-
- As of January 1, 2020 this library no longer supports Python 2 on the latest released version. +
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version. Library versions released prior to that date will continue to be available. For more information please visit Python 2 support on Google Cloud.
diff --git a/docs/aiplatform_v1/data_foundry_service.rst b/docs/aiplatform_v1/data_foundry_service.rst new file mode 100644 index 0000000000..8ba0131e8b --- /dev/null +++ b/docs/aiplatform_v1/data_foundry_service.rst @@ -0,0 +1,6 @@ +DataFoundryService +------------------------------------ + +.. automodule:: google.cloud.aiplatform_v1.services.data_foundry_service + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/gen_ai_cache_service.rst b/docs/aiplatform_v1/gen_ai_cache_service.rst new file mode 100644 index 0000000000..c3fce6f1d7 --- /dev/null +++ b/docs/aiplatform_v1/gen_ai_cache_service.rst @@ -0,0 +1,10 @@ +GenAiCacheService +----------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.gen_ai_cache_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.gen_ai_cache_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/reasoning_engine_execution_service.rst b/docs/aiplatform_v1/reasoning_engine_execution_service.rst new file mode 100644 index 0000000000..0b482b46ad --- /dev/null +++ b/docs/aiplatform_v1/reasoning_engine_execution_service.rst @@ -0,0 +1,6 @@ +ReasoningEngineExecutionService +------------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.reasoning_engine_execution_service + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/reasoning_engine_service.rst b/docs/aiplatform_v1/reasoning_engine_service.rst new file mode 100644 index 0000000000..fe2d99615e --- /dev/null +++ b/docs/aiplatform_v1/reasoning_engine_service.rst @@ -0,0 +1,10 @@ +ReasoningEngineService +---------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.reasoning_engine_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.reasoning_engine_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/services_.rst b/docs/aiplatform_v1/services_.rst index 94af095131..f9610c33b3 100644 --- a/docs/aiplatform_v1/services_.rst +++ b/docs/aiplatform_v1/services_.rst @@ -3,6 +3,7 @@ Services for Google Cloud Aiplatform v1 API .. toctree:: :maxdepth: 2 + data_foundry_service dataset_service deployment_resource_pool_service endpoint_service @@ -12,6 +13,7 @@ Services for Google Cloud Aiplatform v1 API feature_registry_service featurestore_online_serving_service featurestore_service + gen_ai_cache_service gen_ai_tuning_service index_endpoint_service index_service @@ -26,7 +28,10 @@ Services for Google Cloud Aiplatform v1 API persistent_resource_service pipeline_service prediction_service + reasoning_engine_execution_service + reasoning_engine_service schedule_service + session_service specialist_pool_service tensorboard_service vertex_rag_data_service diff --git a/docs/aiplatform_v1/session_service.rst b/docs/aiplatform_v1/session_service.rst new file mode 100644 index 0000000000..4cd3c5ec43 --- /dev/null +++ b/docs/aiplatform_v1/session_service.rst @@ -0,0 +1,10 @@ +SessionService +-------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.session_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.session_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/example_store_service.rst b/docs/aiplatform_v1beta1/example_store_service.rst new file mode 100644 index 0000000000..6d6af4d18b --- /dev/null +++ b/docs/aiplatform_v1beta1/example_store_service.rst @@ -0,0 +1,10 @@ +ExampleStoreService +------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.example_store_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.example_store_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/memory_bank_service.rst b/docs/aiplatform_v1beta1/memory_bank_service.rst new file mode 100644 index 0000000000..89ea895f83 --- /dev/null +++ b/docs/aiplatform_v1beta1/memory_bank_service.rst @@ -0,0 +1,10 @@ +MemoryBankService +----------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.memory_bank_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.memory_bank_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/online_evaluator_service.rst b/docs/aiplatform_v1beta1/online_evaluator_service.rst new file mode 100644 index 0000000000..8c5bf8dbd4 --- /dev/null +++ b/docs/aiplatform_v1beta1/online_evaluator_service.rst @@ -0,0 +1,10 @@ +OnlineEvaluatorService +---------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.online_evaluator_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.online_evaluator_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/services_.rst b/docs/aiplatform_v1beta1/services_.rst index dfe593df9d..0d906736bc 100644 --- a/docs/aiplatform_v1beta1/services_.rst +++ b/docs/aiplatform_v1beta1/services_.rst @@ -7,6 +7,7 @@ Services for Google Cloud Aiplatform v1beta1 API deployment_resource_pool_service endpoint_service evaluation_service + example_store_service extension_execution_service extension_registry_service feature_online_store_admin_service @@ -21,18 +22,21 @@ Services for Google Cloud Aiplatform v1beta1 API job_service llm_utility_service match_service + memory_bank_service metadata_service migration_service model_garden_service model_monitoring_service model_service notebook_service + online_evaluator_service persistent_resource_service pipeline_service prediction_service reasoning_engine_execution_service reasoning_engine_service schedule_service + session_service specialist_pool_service tensorboard_service vertex_rag_data_service diff --git a/docs/aiplatform_v1beta1/session_service.rst b/docs/aiplatform_v1beta1/session_service.rst new file mode 100644 index 0000000000..66fc540c16 --- /dev/null +++ b/docs/aiplatform_v1beta1/session_service.rst @@ -0,0 +1,10 @@ +SessionService +-------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.session_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.session_service.pagers + :members: + :inherited-members: diff --git a/docs/conf.py b/docs/conf.py index bc2466bdb5..886634f5a7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -416,6 +416,7 @@ def setup(*args, **kwargs): # 2. Giving pretty class names to the preview classes # 3. Making Sphinx automodule render the class members instead of # dismissing the exported private classes as "Alias of". + from vertexai import agent_engines from vertexai import evaluation from vertexai import language_models from vertexai import vision_models @@ -430,6 +431,7 @@ def setup(*args, **kwargs): # We select the publicly exported members that have an internal module ("*._*"). # Setting the modules of the GA classes + adopt_members_reexported_from_private_modules(agent_engines) adopt_members_reexported_from_private_modules(evaluation) adopt_members_reexported_from_private_modules(language_models) adopt_members_reexported_from_private_modules(vision_models) diff --git a/docs/summary_overview.md b/docs/summary_overview.md index 7d0ca2569b..38025e91ac 100644 --- a/docs/summary_overview.md +++ b/docs/summary_overview.md @@ -1,14 +1,22 @@ -# Vertex AI API +[ +This is a templated file. Adding content to this file may result in it being +reverted. Instead, if you want to place additional content, create an +"overview_content.md" file in `docs/` directory. The Sphinx tool will +pick up on the content and merge the content. +]: # -Overview of the APIs available for Vertex AI API. +# AI Platform API + +Overview of the APIs available for AI Platform API. ## All entries -Classes, methods and properties & attributes for Vertex AI API. +Classes, methods and properties & attributes for +AI Platform API. [classes](https://cloud.google.com/python/docs/reference/aiplatform/latest/summary_class.html) [methods](https://cloud.google.com/python/docs/reference/aiplatform/latest/summary_method.html) [properties and -attributes](https://cloud.google.com/python/docs/reference/aiplatform/latest/summary_property.html) \ No newline at end of file +attributes](https://cloud.google.com/python/docs/reference/aiplatform/latest/summary_property.html) diff --git a/gemini_docs/README.md b/gemini_docs/README.md index e7ac132fac..ee68c98ffc 100644 --- a/gemini_docs/README.md +++ b/gemini_docs/README.md @@ -1,12 +1,9 @@ # Vertex Generative AI SDK for Python -The Vertex Generative AI SDK helps developers use Google's generative AI +The Gen AI Modules in the Vertex SDK help developers use Google's generative AI [Gemini models](http://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/overview) -to build AI-powered features and applications. -The SDKs support use cases like the following: +to build AI-powered features and applications in Vertex. -- Generate text from texts, images and videos (multimodal generation) -- Build stateful multi-turn conversations (chat) -- Function calling +The modules currently available are: Evaluation, Agent Engines, Prompt Management, and Prompt Optimization. See below for instructions on getting started with each module. For other Gemini features on Vertex, use the [Gen AI SDK](https://github.com/googleapis/python-genai). ## Installation @@ -15,12 +12,264 @@ To install the Python package, run the following command: ```shell -pip3 install --upgrade --user "google-cloud-aiplatform>=1.38" +pip3 install --upgrade --user "google-cloud-aiplatform>=1.114.0" ``` -## Usage +#### Imports: +```python +import vertexai +from vertexai import types +``` + +#### Client initialization + +```python +client = vertexai.Client(project='my-project', location='us-central1') +``` + +#### Gen AI Evaluation + +To run evaluation, first generate model responses from a set of prompts. + +```python +import pandas as pd + +prompts_df = pd.DataFrame({ + "prompt": [ + "What is the capital of France?", + "Write a haiku about a cat.", + "Write a Python function to calculate the factorial of a number.", + "Translate 'How are you?' to French.", + ], +}) + +inference_results = client.evals.run_inference( + model="gemini-2.5-flash", + ("def factorial(n):\n" + " if n < 0:\n" + " return 'Factorial does not exist for negative numbers'\n" + " elif n == 0:\n" + " return 1\n" + " else:\n" + " fact = 1\n" + " i = 1\n" + " while i <= n:\n" + " fact *= i\n" + " i += 1\n" + " return fact"), +) +inference_results.show() +``` + +Then run evaluation by providing the inference results and specifying the metric types. + +```python +eval_result = client.evals.evaluate( + dataset=inference_results, + metrics=[ + types.RubricMetric.GENERAL_QUALITY, + ] +) +eval_result.show() +``` + +#### Agent Engine with Agent Development Kit (ADK) + +First, define a function that looks up the exchange rate: + +```python +def get_exchange_rate( + currency_from: str = "USD", + currency_to: str = "EUR", + currency_date: str = "latest", +): + """Retrieves the exchange rate between two currencies on a specified date. + + Uses the Frankfurter API (https://api.frankfurter.app/) to obtain + exchange rate data. + + Returns: + dict: A dictionary containing the exchange rate information. + Example: {"amount": 1.0, "base": "USD", "date": "2023-11-24", + "rates": {"EUR": 0.95534}} + """ + import requests + response = requests.get( + f"https://api.frankfurter.app/{currency_date}", + params={"from": currency_from, "to": currency_to}, + ) + return response.json() +``` + +Next, define an ADK Agent: + +```python + +from google.adk.agents import Agent +from vertexai.agent_engines import AdkApp + +app = AdkApp(agent=Agent( + model="gemini-2.0-flash", # Required. + name='currency_exchange_agent', # Required. + tools=[get_exchange_rate], # Optional. +)) +``` + +Test the agent locally using US dollars and Swedish Krona: + +```python +async for event in app.async_stream_query( + user_id="user-id", + message="What is the exchange rate from US dollars to SEK today?", +): + print(event) +``` + +To deploy the agent to Agent Engine: + +```python +remote_app = client.agent_engines.create( + agent=app, + config={ + "requirements": ["google-cloud-aiplatform[agent_engines,adk]"], + }, +) +``` + +You can also run queries against the deployed agent: + +```python +async for event in remote_app.async_stream_query( + user_id="user-id", + message="What is the exchange rate from US dollars to SEK today?", +): + print(event) +``` + +#### Prompt Optimization + +To do a zero-shot prompt optimization, use the `optimize_prompt` +method. + +```python +prompt = "Generate system instructions for a question-answering assistant" +response = client.prompt_optimizer.optimize_prompt(prompt=prompt) +print(response.raw_text_response) +if response.parsed_response: + print(response.parsed_response.suggested_prompt) +``` + +To call the data-driven prompt optimization, call the `optimize` method. +In this case however, we need to provide `vapo_config`. This config needs to +have either service account or project **number** and the config path. +Please refer to this [tutorial](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/prompts/data-driven-optimizer) +for more details on config parameter. + +```python +import logging + +project_number = PROJECT_NUMBER # replace with your project number +service_account = f"{project_number}-compute@developer.gserviceaccount.com" + +vapo_config = types.PromptOptimizerVAPOConfig( + config_path="gs://your-bucket/config.json", + service_account_project_number=project_number, + wait_for_completion=False +) + +# Set up logging to see the progress of the optimization job +logging.basicConfig(encoding='utf-8', level=logging.INFO, force=True) + +result = client.prompt_optimizer.optimize(method="vapo", config=vapo_config) +``` + +We can also call optimize method async. + +```python +await client.aio.prompt_optimizer.optimize(method="vapo", config=vapo_config) +``` + +#### Prompt Management + +First define your prompt as a dictionary or `types.Prompt` object. Then call +`create()`. + +```python +prompt = { + "prompt_data": { + "contents": [{"parts": [{"text": "Hello, {name}! How are you?"}]}], + "system_instruction": {"parts": [{"text": "Please answer in a short sentence."}]}, + "variables": [ + {"name": {"text": "Alice"}}, + ], + "model": "gemini-2.5-flash", + }, +} + +prompt_resource = client.prompts.create( + prompt=prompt, +) +``` + +Note that you can also use the `types.Prompt` object to define your prompt. Some +of the types used to do this are from the [Gen AI SDK](https://github.com/googleapis/python-genai). + +```python +import types +from google.genai import types as genai_types + +prompt = types.Prompt( + prompt_data=types.PromptData( + contents=[genai_types.Content(parts=[genai_types.Part(text="Hello, {name}! How are you?")])], + system_instruction=genai_types.Content(parts=[genai_types.Part(text="Please answer in a short sentence.")]), + variables=[ + {"name": genai_types.Part(text="Alice")}, + ], + model="gemini-2.5-flash", + ), +) +``` + +Retrieve a prompt by calling `get()` with the `prompt_id`. + +```python +retrieved_prompt = client.prompts.get(prompt_id=prompt_resource.prompt_id) +``` + +After creating or retrieving a prompt, you can call `generate_content()` with +that prompt using the Gen AI SDK. + +The following uses a utility function available on Prompt objects to transform a +Prompt object into a list of Content objects for use with `generate_content`. To +run this you need to have the Gen AI SDK installed, which you can do via +`pip install google-genai`. + +```python +from google import genai +from google.genai import types as genai_types + +# Create a Client in the Gen AI SDK +genai_client = genai.Client(vertexai=True, project="your-project", location="your-location") + +# Call generate_content() with the prompt +response = genai_client.models.generate_content( + model=retrieved_prompt.prompt_data.model, + contents=retrieved_prompt.assemble_contents(), +) +``` + +## Warning + +The following Generative AI modules in the Vertex AI SDK are deprecated as of +June 24, 2025 and will be removed on June 24, 2026: +`vertexai.generative_models`, `vertexai.language_models`, +`vertexai.vision_models`, `vertexai.tuning`, `vertexai.caching`. Please use the +[Google Gen AI SDK](https://pypi.org/project/google-genai/) to access these +features. See +[the migration guide](https://cloud.google.com/vertex-ai/generative-ai/docs/deprecations/genai-vertexai-sdk) +for details. You can continue using all other Vertex AI SDK modules, as they are +the recommended way to use the API. -For detailed instructions, see [quickstart](http://cloud.google.com/vertex-ai/docs/generative-ai/start/quickstarts/quickstart-multimodal) and [Introduction to multimodal classes in the Vertex AI SDK](http://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/sdk-for-gemini/gemini-sdk-overview-reference). #### Imports: ```python @@ -313,6 +562,88 @@ result = EvalTask( ) ``` +#### Agent Engine + +Before you begin, install the packages with + +```shell +pip3 install --upgrade --user "google-cloud-aiplatform[agent_engines,adk]>=1.111" +``` + +First, define a function that looks up the exchange rate: + +```python +def get_exchange_rate( + currency_from: str = "USD", + currency_to: str = "EUR", + currency_date: str = "latest", +): + """Retrieves the exchange rate between two currencies on a specified date. + + Uses the Frankfurter API (https://api.frankfurter.app/) to obtain + exchange rate data. + + Returns: + dict: A dictionary containing the exchange rate information. + Example: {"amount": 1.0, "base": "USD", "date": "2023-11-24", + "rates": {"EUR": 0.95534}} + """ + import requests + response = requests.get( + f"https://api.frankfurter.app/{currency_date}", + params={"from": currency_from, "to": currency_to}, + ) + return response.json() +``` + +Next, define an ADK Agent: + +```python +from google.adk.agents import Agent +from vertexai.agent_engines import AdkApp + +app = AdkApp(agent=Agent( + model="gemini-2.0-flash", # Required. + name='currency_exchange_agent', # Required. + tools=[get_exchange_rate], # Optional. +)) +``` + +Test the agent locally using US dollars and Swedish Krona: + +```python +async for event in app.async_stream_query( + user_id="user-id", + message="What is the exchange rate from US dollars to SEK today?", +): + print(event) +``` + +To deploy the agent to Agent Engine: + +```python +vertexai.init( + project='my-project', + location='us-central1', + staging_bucket="gs://my-staging-bucket", +) + +remote_app = vertexai.agent_engines.create( + app, + requirements=["google-cloud-aiplatform[agent_engines,adk]"], +) +``` + +You can also run queries against the deployed agent: + +```python +async for event in remote_app.async_stream_query( + user_id="user-id", + message="What is the exchange rate from US dollars to SEK today?", +): + print(event) +``` + ## Documentation You can find complete documentation for the Vertex AI SDKs and the Gemini model in the Google Cloud [documentation](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) diff --git a/gemini_docs/conf.py b/gemini_docs/conf.py index 1b55aa99c4..82b45d366f 100644 --- a/gemini_docs/conf.py +++ b/gemini_docs/conf.py @@ -416,6 +416,7 @@ def setup(*args, **kwargs): # 2. Giving pretty class names to the preview classes # 3. Making Sphinx automodule render the class members instead of # dismissing the exported private classes as "Alias of". + from vertexai import agent_engines from vertexai import evaluation from vertexai import language_models from vertexai import vision_models @@ -430,6 +431,7 @@ def setup(*args, **kwargs): # We select the publicly exported members that have an internal module ("*._*"). # Setting the modules of the GA classes + adopt_members_reexported_from_private_modules(agent_engines) adopt_members_reexported_from_private_modules(evaluation) adopt_members_reexported_from_private_modules(language_models) adopt_members_reexported_from_private_modules(vision_models) diff --git a/gemini_docs/vertexai/vertexai.rst b/gemini_docs/vertexai/vertexai.rst index 3496c44afc..085054b801 100644 --- a/gemini_docs/vertexai/vertexai.rst +++ b/gemini_docs/vertexai/vertexai.rst @@ -6,6 +6,93 @@ Vertex AI SDK :show-inheritance: :inherited-members: + .. autoattribute:: types + +.. autoclass:: vertexai.Client + :members: + :undoc-members: + :show-inheritance: + :inherited-members: + + .. autoattribute:: evals + .. autoattribute:: agent_engines + .. autoattribute:: prompt_optimizer + .. autoattribute:: prompts + +.. automodule:: vertexai._genai.evals + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: vertexai._genai.prompt_optimizer + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: vertexai._genai.prompts + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: vertexai._genai.agent_engines + :members: + :undoc-members: + :show-inheritance: + + .. autoclass:: vertexai._genai.agent_engines.AgentEngines + :members: + .. autoattribute:: memories + .. autoattribute:: sessions + .. autoattribute:: sandboxes + + Sessions + -------- + .. automodule:: vertexai._genai.sessions + :members: + :undoc-members: + :show-inheritance: + + .. autoclass:: vertexai._genai.sessions.Sessions + :members: + .. autoattribute:: events + + Events + ~~~~~~ + .. automodule:: vertexai._genai.session_events + :members: + :undoc-members: + :show-inheritance: + + Memories + -------- + .. automodule:: vertexai._genai.memories + :members: + :undoc-members: + :show-inheritance: + + .. autoclass:: vertexai._genai.memories.Memories + :members: + .. autoattribute:: revisions + + Revisions + ~~~~~~~~~ + .. automodule:: vertexai._genai.memory_revisions + :members: + :undoc-members: + :show-inheritance: + + Sandboxes + -------- + .. automodule:: vertexai._genai.sandboxes + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: vertexai._genai.types + :members: + :undoc-members: + :show-inheritance: + .. automodule:: vertexai.generative_models :members: :show-inheritance: @@ -60,6 +147,11 @@ Vertex AI SDK :show-inheritance: :inherited-members: +.. automodule:: vertexai.agent_engines + :members: + :show-inheritance: + :inherited-members: + .. automodule:: vertexai.preview.reasoning_engines :members: :show-inheritance: diff --git a/google/cloud/aiplatform/_streaming_prediction.py b/google/cloud/aiplatform/_streaming_prediction.py index 16a1d1331b..9225519c26 100644 --- a/google/cloud/aiplatform/_streaming_prediction.py +++ b/google/cloud/aiplatform/_streaming_prediction.py @@ -130,7 +130,9 @@ async def predict_stream_of_tensor_lists_from_single_tensor_list_async( inputs=tensor_list, parameters=parameters_tensor, ) - async for response in await prediction_service_async_client.server_streaming_predict( + async for ( + response + ) in await prediction_service_async_client.server_streaming_predict( request=request ): yield response.outputs @@ -183,7 +185,9 @@ async def predict_stream_of_dict_lists_from_single_dict_list_async( """ tensor_list = [value_to_tensor(d) for d in dict_list] parameters_tensor = value_to_tensor(parameters) if parameters else None - async for tensor_list in predict_stream_of_tensor_lists_from_single_tensor_list_async( + async for ( + tensor_list + ) in predict_stream_of_tensor_lists_from_single_tensor_list_async( prediction_service_async_client=prediction_service_async_client, endpoint_name=endpoint_name, tensor_list=tensor_list, diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index 778e0f4c67..cca3d69064 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -639,7 +639,6 @@ def _get_and_validate_project_location( project: Optional[str] = None, location: Optional[str] = None, ) -> Tuple[str, str]: - """Validate the project and location for the resource. Args: @@ -1505,14 +1504,10 @@ class PreviewMixin(abc.ABC): class allows the child class to introduce preview features. """ - @classmethod - @property - @abc.abstractmethod - def _preview_class(cls: Type[PreviewClass]) -> Type[PreviewClass]: - """Class that is currently in preview or has a preview feature. - Class must have `resource_name` and `credentials` attributes. - """ - pass + _preview_class: Type[PreviewClass] + """Class that is currently in preview or has a preview feature. + Class must have `resource_name` and `credentials` attributes. + """ @property def preview(self) -> PreviewClass: diff --git a/google/cloud/aiplatform/compat/__init__.py b/google/cloud/aiplatform/compat/__init__.py index de1d1e9ded..2faacba172 100644 --- a/google/cloud/aiplatform/compat/__init__.py +++ b/google/cloud/aiplatform/compat/__init__.py @@ -181,8 +181,7 @@ services.featurestore_online_serving_service_client_v1 ) services.featurestore_service_client = services.featurestore_service_client_v1 - # TODO(b/342585299): Temporary code. Switch to v1 once v1 is available. - services.gen_ai_cache_service_client = services.gen_ai_cache_service_client_v1beta1 + services.gen_ai_cache_service_client = services.gen_ai_cache_service_client_v1 services.job_service_client = services.job_service_client_v1 services.model_garden_service_client = services.model_garden_service_client_v1 services.model_service_client = services.model_service_client_v1 @@ -203,8 +202,7 @@ types.annotation_spec = types.annotation_spec_v1 types.artifact = types.artifact_v1 types.batch_prediction_job = types.batch_prediction_job_v1 - # TODO(b/342585299): Temporary code. Switch to v1 once v1 is available. - types.cached_content = types.cached_content_v1beta1 + types.cached_content = types.cached_content_v1 types.completion_stats = types.completion_stats_v1 types.context = types.context_v1 types.custom_job = types.custom_job_v1 diff --git a/google/cloud/aiplatform/compat/services/__init__.py b/google/cloud/aiplatform/compat/services/__init__.py index 3d4d5df0cf..d69dcc83a1 100644 --- a/google/cloud/aiplatform/compat/services/__init__.py +++ b/google/cloud/aiplatform/compat/services/__init__.py @@ -15,6 +15,94 @@ # limitations under the License. # +from google.cloud.aiplatform_v1.services.dataset_service import ( + client as dataset_service_client_v1, +) +from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( + client as deployment_resource_pool_service_client_v1, +) +from google.cloud.aiplatform_v1.services.endpoint_service import ( + client as endpoint_service_client_v1, +) +from google.cloud.aiplatform_v1.services.feature_online_store_admin_service import ( + client as feature_online_store_admin_service_client_v1, +) +from google.cloud.aiplatform_v1.services.feature_online_store_service import ( + client as feature_online_store_service_client_v1, +) +from google.cloud.aiplatform_v1.services.feature_registry_service import ( + client as feature_registry_service_client_v1, +) +from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import ( + client as featurestore_online_serving_service_client_v1, +) +from google.cloud.aiplatform_v1.services.featurestore_service import ( + client as featurestore_service_client_v1, +) +from google.cloud.aiplatform_v1.services.gen_ai_cache_service import ( + client as gen_ai_cache_service_client_v1, +) +from google.cloud.aiplatform_v1.services.index_endpoint_service import ( + client as index_endpoint_service_client_v1, +) +from google.cloud.aiplatform_v1.services.index_service import ( + client as index_service_client_v1, +) +from google.cloud.aiplatform_v1.services.job_service import ( + client as job_service_client_v1, +) +from google.cloud.aiplatform_v1.services.metadata_service import ( + client as metadata_service_client_v1, +) +from google.cloud.aiplatform_v1.services.model_garden_service import ( + client as model_garden_service_client_v1, +) +from google.cloud.aiplatform_v1.services.model_service import ( + client as model_service_client_v1, +) +from google.cloud.aiplatform_v1.services.persistent_resource_service import ( + client as persistent_resource_service_client_v1, +) +from google.cloud.aiplatform_v1.services.pipeline_service import ( + client as pipeline_service_client_v1, +) +from google.cloud.aiplatform_v1.services.prediction_service import ( + async_client as prediction_service_async_client_v1, +) +from google.cloud.aiplatform_v1.services.prediction_service import ( + client as prediction_service_client_v1, +) +from google.cloud.aiplatform_v1.services.reasoning_engine_execution_service import ( + async_client as reasoning_engine_execution_async_client_v1, +) +from google.cloud.aiplatform_v1.services.reasoning_engine_execution_service import ( + client as reasoning_engine_execution_service_client_v1, +) +from google.cloud.aiplatform_v1.services.reasoning_engine_service import ( + client as reasoning_engine_service_client_v1, +) +from google.cloud.aiplatform_v1.services.schedule_service import ( + client as schedule_service_client_v1, +) +from google.cloud.aiplatform_v1.services.specialist_pool_service import ( + client as specialist_pool_service_client_v1, +) +from google.cloud.aiplatform_v1.services.tensorboard_service import ( + client as tensorboard_service_client_v1, +) +from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( + async_client as vertex_rag_data_service_async_client_v1, +) +from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( + client as vertex_rag_data_service_client_v1, +) +from google.cloud.aiplatform_v1.services.vertex_rag_service import ( + async_client as vertex_rag_service_async_client_v1, + client as vertex_rag_service_client_v1, +) +from google.cloud.aiplatform_v1.services.vizier_service import ( + client as vizier_service_client_v1, +) from google.cloud.aiplatform_v1beta1.services.dataset_service import ( client as dataset_service_client_v1beta1, ) @@ -24,18 +112,21 @@ from google.cloud.aiplatform_v1beta1.services.endpoint_service import ( client as endpoint_service_client_v1beta1, ) +from google.cloud.aiplatform_v1beta1.services.example_store_service import ( + client as example_store_service_client_v1beta1, +) from google.cloud.aiplatform_v1beta1.services.extension_execution_service import ( client as extension_execution_service_client_v1beta1, ) from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( client as extension_registry_service_client_v1beta1, ) -from google.cloud.aiplatform_v1beta1.services.feature_online_store_service import ( - client as feature_online_store_service_client_v1beta1, -) from google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service import ( client as feature_online_store_admin_service_client_v1beta1, ) +from google.cloud.aiplatform_v1beta1.services.feature_online_store_service import ( + client as feature_online_store_service_client_v1beta1, +) from google.cloud.aiplatform_v1beta1.services.feature_registry_service import ( client as feature_registry_service_client_v1beta1, ) @@ -48,12 +139,12 @@ from google.cloud.aiplatform_v1beta1.services.gen_ai_cache_service import ( client as gen_ai_cache_service_client_v1beta1, ) -from google.cloud.aiplatform_v1beta1.services.index_service import ( - client as index_service_client_v1beta1, -) from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import ( client as index_endpoint_service_client_v1beta1, ) +from google.cloud.aiplatform_v1beta1.services.index_service import ( + client as index_service_client_v1beta1, +) from google.cloud.aiplatform_v1beta1.services.job_service import ( client as job_service_client_v1beta1, ) @@ -66,30 +157,30 @@ from google.cloud.aiplatform_v1beta1.services.model_garden_service import ( client as model_garden_service_client_v1beta1, ) -from google.cloud.aiplatform_v1beta1.services.model_service import ( - client as model_service_client_v1beta1, -) from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import ( client as model_monitoring_service_client_v1beta1, ) +from google.cloud.aiplatform_v1beta1.services.model_service import ( + client as model_service_client_v1beta1, +) from google.cloud.aiplatform_v1beta1.services.persistent_resource_service import ( client as persistent_resource_service_client_v1beta1, ) from google.cloud.aiplatform_v1beta1.services.pipeline_service import ( client as pipeline_service_client_v1beta1, ) -from google.cloud.aiplatform_v1beta1.services.prediction_service import ( - client as prediction_service_client_v1beta1, -) from google.cloud.aiplatform_v1beta1.services.prediction_service import ( async_client as prediction_service_async_client_v1beta1, ) -from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import ( - client as reasoning_engine_service_client_v1beta1, +from google.cloud.aiplatform_v1beta1.services.prediction_service import ( + client as prediction_service_client_v1beta1, ) from google.cloud.aiplatform_v1beta1.services.reasoning_engine_execution_service import ( client as reasoning_engine_execution_service_client_v1beta1, ) +from google.cloud.aiplatform_v1beta1.services.reasoning_engine_service import ( + client as reasoning_engine_service_client_v1beta1, +) from google.cloud.aiplatform_v1beta1.services.schedule_service import ( client as schedule_service_client_v1beta1, ) @@ -100,95 +191,19 @@ client as tensorboard_service_client_v1beta1, ) from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( - client as vertex_rag_data_service_client_v1beta1, + async_client as vertex_rag_data_service_async_client_v1beta1, ) from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( - async_client as vertex_rag_data_service_async_client_v1beta1, + client as vertex_rag_data_service_client_v1beta1, ) from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import ( + async_client as vertex_rag_service_async_client_v1beta1, client as vertex_rag_service_client_v1beta1, ) from google.cloud.aiplatform_v1beta1.services.vizier_service import ( client as vizier_service_client_v1beta1, ) - -from google.cloud.aiplatform_v1.services.dataset_service import ( - client as dataset_service_client_v1, -) -from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import ( - client as deployment_resource_pool_service_client_v1, -) -from google.cloud.aiplatform_v1.services.endpoint_service import ( - client as endpoint_service_client_v1, -) -from google.cloud.aiplatform_v1.services.feature_online_store_service import ( - client as feature_online_store_service_client_v1, -) -from google.cloud.aiplatform_v1.services.feature_online_store_admin_service import ( - client as feature_online_store_admin_service_client_v1, -) -from google.cloud.aiplatform_v1.services.feature_registry_service import ( - client as feature_registry_service_client_v1, -) -from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import ( - client as featurestore_online_serving_service_client_v1, -) -from google.cloud.aiplatform_v1.services.featurestore_service import ( - client as featurestore_service_client_v1, -) -from google.cloud.aiplatform_v1.services.index_service import ( - client as index_service_client_v1, -) -from google.cloud.aiplatform_v1.services.index_endpoint_service import ( - client as index_endpoint_service_client_v1, -) -from google.cloud.aiplatform_v1.services.job_service import ( - client as job_service_client_v1, -) -from google.cloud.aiplatform_v1.services.metadata_service import ( - client as metadata_service_client_v1, -) -from google.cloud.aiplatform_v1.services.model_garden_service import ( - client as model_garden_service_client_v1, -) -from google.cloud.aiplatform_v1.services.model_service import ( - client as model_service_client_v1, -) -from google.cloud.aiplatform_v1.services.persistent_resource_service import ( - client as persistent_resource_service_client_v1, -) -from google.cloud.aiplatform_v1.services.pipeline_service import ( - client as pipeline_service_client_v1, -) -from google.cloud.aiplatform_v1.services.prediction_service import ( - client as prediction_service_client_v1, -) -from google.cloud.aiplatform_v1.services.prediction_service import ( - async_client as prediction_service_async_client_v1, -) -from google.cloud.aiplatform_v1.services.schedule_service import ( - client as schedule_service_client_v1, -) -from google.cloud.aiplatform_v1.services.specialist_pool_service import ( - client as specialist_pool_service_client_v1, -) -from google.cloud.aiplatform_v1.services.tensorboard_service import ( - client as tensorboard_service_client_v1, -) -from google.cloud.aiplatform_v1.services.vizier_service import ( - client as vizier_service_client_v1, -) -from google.cloud.aiplatform_v1.services.vertex_rag_service import ( - client as vertex_rag_service_client_v1, -) -from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( - client as vertex_rag_data_service_client_v1, -) -from google.cloud.aiplatform_v1.services.vertex_rag_data_service import ( - async_client as vertex_rag_data_service_async_client_v1, -) - __all__ = ( # v1 dataset_service_client_v1, @@ -209,17 +224,21 @@ pipeline_service_client_v1, prediction_service_client_v1, prediction_service_async_client_v1, + reasoning_engine_execution_service_client_v1, + reasoning_engine_service_client_v1, schedule_service_client_v1, specialist_pool_service_client_v1, tensorboard_service_client_v1, vizier_service_client_v1, vertex_rag_data_service_async_client_v1, vertex_rag_data_service_client_v1, + vertex_rag_service_async_client_v1, vertex_rag_service_client_v1, # v1beta1 dataset_service_client_v1beta1, deployment_resource_pool_service_client_v1beta1, endpoint_service_client_v1beta1, + example_store_service_client_v1beta1, feature_online_store_service_client_v1beta1, feature_online_store_admin_service_client_v1beta1, feature_registry_service_client_v1beta1, @@ -242,6 +261,7 @@ specialist_pool_service_client_v1beta1, metadata_service_client_v1beta1, tensorboard_service_client_v1beta1, + vertex_rag_service_async_client_v1beta1, vertex_rag_service_client_v1beta1, vertex_rag_data_service_client_v1beta1, vertex_rag_data_service_async_client_v1beta1, diff --git a/google/cloud/aiplatform/compat/types/__init__.py b/google/cloud/aiplatform/compat/types/__init__.py index 210dfde124..14c74fbd91 100644 --- a/google/cloud/aiplatform/compat/types/__init__.py +++ b/google/cloud/aiplatform/compat/types/__init__.py @@ -118,6 +118,7 @@ annotation_spec as annotation_spec_v1, artifact as artifact_v1, batch_prediction_job as batch_prediction_job_v1, + cached_content as cached_content_v1, completion_stats as completion_stats_v1, context as context_v1, custom_job as custom_job_v1, diff --git a/google/cloud/aiplatform/constants/base.py b/google/cloud/aiplatform/constants/base.py index 5226b41a39..3abf032874 100644 --- a/google/cloud/aiplatform/constants/base.py +++ b/google/cloud/aiplatform/constants/base.py @@ -28,12 +28,14 @@ "asia-northeast2", "asia-northeast3", "asia-south1", + "asia-south2", "asia-southeast1", "asia-southeast2", "australia-southeast1", "australia-southeast2", "europe-central2", "europe-north1", + "europe-north2", "europe-southwest1", "europe-west1", "europe-west2", @@ -43,6 +45,7 @@ "europe-west8", "europe-west9", "europe-west12", + "global", "me-central1", "me-central2", "me-west1", @@ -54,11 +57,13 @@ "us-east1", "us-east4", "us-east5", + "us-east7", "us-south1", "us-west1", "us-west2", "us-west3", "us-west4", + "us-west8", } ) diff --git a/google/cloud/aiplatform/constants/prediction.py b/google/cloud/aiplatform/constants/prediction.py index 5f7e8bcd3f..88ae2fd5ed 100644 --- a/google/cloud/aiplatform/constants/prediction.py +++ b/google/cloud/aiplatform/constants/prediction.py @@ -34,6 +34,12 @@ XGBOOST = "xgboost" XGBOOST_CONTAINER_URIS = [ + "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-1:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-1:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-1:latest", + "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-0:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-0:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-0:latest", "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-7:latest", "europe-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-7:latest", "asia-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-7:latest", @@ -64,6 +70,15 @@ ] SKLEARN_CONTAINER_URIS = [ + "us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-6:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-6:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-6:latest", + "us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-5:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-5:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-5:latest", + "us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-4:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-4:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-4:latest", "us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-3:latest", "europe-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-3:latest", "asia-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-3:latest", @@ -88,6 +103,18 @@ ] TF_CONTAINER_URIS = [ + "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-15:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-15:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-15:latest", + "us-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-15:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-15:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-15:latest", + "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-14:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-14:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-14:latest", + "us-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-14:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-14:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/tf2-gpu.2-14:latest", "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-13:latest", "europe-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-13:latest", "asia-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-13:latest", @@ -175,6 +202,24 @@ ] PYTORCH_CONTAINER_URIS = [ + "us-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-4:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-4:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-4:latest", + "us-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-4:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-4:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-4:latest", + "us-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-3:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-3:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-3:latest", + "us-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-3:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-3:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-3:latest", + "us-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-2:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-2:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-2:latest", + "us-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-2:latest", + "europe-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-2:latest", + "asia-docker.pkg.dev/vertex-ai/prediction/pytorch-gpu.2-2:latest", "us-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-1:latest", "europe-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-1:latest", "asia-docker.pkg.dev/vertex-ai/prediction/pytorch-cpu.2-1:latest", diff --git a/google/cloud/aiplatform/datasets/__init__.py b/google/cloud/aiplatform/datasets/__init__.py index 0f6b7f42fa..10d2663ea9 100644 --- a/google/cloud/aiplatform/datasets/__init__.py +++ b/google/cloud/aiplatform/datasets/__init__.py @@ -16,9 +16,13 @@ # from google.cloud.aiplatform.datasets.dataset import _Dataset -from google.cloud.aiplatform.datasets.column_names_dataset import _ColumnNamesDataset +from google.cloud.aiplatform.datasets.column_names_dataset import ( + _ColumnNamesDataset, +) from google.cloud.aiplatform.datasets.tabular_dataset import TabularDataset -from google.cloud.aiplatform.datasets.time_series_dataset import TimeSeriesDataset +from google.cloud.aiplatform.datasets.time_series_dataset import ( + TimeSeriesDataset, +) from google.cloud.aiplatform.datasets.image_dataset import ImageDataset from google.cloud.aiplatform.datasets.text_dataset import TextDataset from google.cloud.aiplatform.datasets.video_dataset import VideoDataset diff --git a/google/cloud/aiplatform/docker_utils/build.py b/google/cloud/aiplatform/docker_utils/build.py index f8cbf500a2..acf7ca0393 100644 --- a/google/cloud/aiplatform/docker_utils/build.py +++ b/google/cloud/aiplatform/docker_utils/build.py @@ -223,7 +223,7 @@ def _prepare_exposed_ports(exposed_ports: Optional[List[int]] = None) -> str: def _prepare_environment_variables( - environment_variables: Optional[Dict[str, str]] = None + environment_variables: Optional[Dict[str, str]] = None, ) -> str: """Returns the Dockerfile entries required to set environment variables in containers. @@ -418,6 +418,7 @@ def build_image( pip_command: str = "pip", python_command: str = "python", no_cache: bool = True, + platform: Optional[str] = None, **kwargs, ) -> Image: """Builds a Docker image. @@ -459,6 +460,10 @@ def build_image( reduces the image building time. See https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#leverage-build-cache for more details. + platform (str): + Optional. The target platform for the Docker image build. See + https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images + for more details. **kwargs: Other arguments to pass to underlying method that generates the Dockerfile. @@ -472,9 +477,14 @@ def build_image( tag_options = ["-t", output_image_name] cache_args = ["--no-cache"] if no_cache else [] + platform_args = ["--platform", platform] if platform is not None else [] command = ( - ["docker", "build"] + cache_args + tag_options + ["--rm", "-f-", host_workdir] + ["docker", "build"] + + cache_args + + platform_args + + tag_options + + ["--rm", "-f-", host_workdir] ) requirements_relative_path = _get_relative_path_to_workdir( diff --git a/google/cloud/aiplatform/docker_utils/run.py b/google/cloud/aiplatform/docker_utils/run.py index 496d977a7d..155ee39f3a 100644 --- a/google/cloud/aiplatform/docker_utils/run.py +++ b/google/cloud/aiplatform/docker_utils/run.py @@ -30,7 +30,9 @@ ) from google.cloud.aiplatform.constants import prediction -from google.cloud.aiplatform.docker_utils.utils import DEFAULT_MOUNTED_MODEL_DIRECTORY +from google.cloud.aiplatform.docker_utils.utils import ( + DEFAULT_MOUNTED_MODEL_DIRECTORY, +) from google.cloud.aiplatform.utils import prediction_utils _logger = logging.getLogger(__name__) diff --git a/google/cloud/aiplatform/explain/lit.py b/google/cloud/aiplatform/explain/lit.py index 6d388f4559..4e2ca9c1b3 100644 --- a/google/cloud/aiplatform/explain/lit.py +++ b/google/cloud/aiplatform/explain/lit.py @@ -311,7 +311,9 @@ def _set_up_attribution_explainer( """ try: import explainable_ai_sdk - from explainable_ai_sdk.metadata.tf.v2 import SavedModelMetadataBuilder + from google3.third_party.explainable_ai_sdk.sdk.metadata.tf.v2.saved_model_metadata_builder import ( + SavedModelMetadataBuilder, + ) except ImportError: logging.info( "Skipping explanations because the Explainable AI SDK is not installed." diff --git a/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py b/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py index c9fc2d0e22..ab98600c0d 100644 --- a/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py +++ b/google/cloud/aiplatform/explain/metadata/tf/v1/saved_model_metadata_builder.py @@ -126,7 +126,7 @@ def get_metadata_protobuf(self) -> explanation_metadata.ExplanationMetadata: def _create_input_metadata_from_signature( - signature_inputs: Dict[str, "tf.Tensor"] # noqa: F821 + signature_inputs: Dict[str, "tf.Tensor"], # noqa: F821 ) -> Dict[str, explanation_metadata.ExplanationMetadata.InputMetadata]: """Creates InputMetadata from signature inputs. diff --git a/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py b/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py index 7d19e5680d..2df03b3044 100644 --- a/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py +++ b/google/cloud/aiplatform/explain/metadata/tf/v2/saved_model_metadata_builder.py @@ -75,9 +75,7 @@ def __init__( signature_name ) - def _infer_metadata_entries_from_model( - self, signature_name: str - ) -> Tuple[ + def _infer_metadata_entries_from_model(self, signature_name: str) -> Tuple[ Dict[str, explanation_metadata.ExplanationMetadata.InputMetadata], Dict[str, explanation_metadata.ExplanationMetadata.OutputMetadata], ]: @@ -107,10 +105,10 @@ def _infer_metadata_entries_from_model( output_mds = {} for name in output_sig: if not self._explain_output or self._explain_output[0] == name: - output_mds[ - name - ] = explanation_metadata.ExplanationMetadata.OutputMetadata( - output_tensor_name=name, + output_mds[name] = ( + explanation_metadata.ExplanationMetadata.OutputMetadata( + output_tensor_name=name, + ) ) break else: diff --git a/google/cloud/aiplatform/featurestore/_entity_type.py b/google/cloud/aiplatform/featurestore/_entity_type.py index bfbaa494e4..6891c6a599 100644 --- a/google/cloud/aiplatform/featurestore/_entity_type.py +++ b/google/cloud/aiplatform/featurestore/_entity_type.py @@ -116,11 +116,11 @@ def __init__( ) self._gca_resource = self._get_gca_resource( resource_name=entity_type_name, - parent_resource_name_fields={ - featurestore.Featurestore._resource_noun: featurestore_id - } - if featurestore_id - else featurestore_id, + parent_resource_name_fields=( + {featurestore.Featurestore._resource_noun: featurestore_id} + if featurestore_id + else featurestore_id + ), ) self._featurestore_online_client = self._instantiate_featurestore_online_client( @@ -1842,7 +1842,7 @@ def _apply_feature_timestamp( @staticmethod def _is_timestamp( - timestamp: Union[datetime.datetime, timestamp_pb2.Timestamp] + timestamp: Union[datetime.datetime, timestamp_pb2.Timestamp], ) -> bool: return isinstance(timestamp, datetime.datetime) or isinstance( timestamp, timestamp_pb2.Timestamp diff --git a/google/cloud/aiplatform/featurestore/feature.py b/google/cloud/aiplatform/featurestore/feature.py index dd53b7756c..39b8f8d812 100644 --- a/google/cloud/aiplatform/featurestore/feature.py +++ b/google/cloud/aiplatform/featurestore/feature.py @@ -114,12 +114,14 @@ def __init__( ) self._gca_resource = self._get_gca_resource( resource_name=feature_name, - parent_resource_name_fields={ - featurestore.Featurestore._resource_noun: featurestore_id, - featurestore.EntityType._resource_noun: entity_type_id, - } - if featurestore_id - else featurestore_id, + parent_resource_name_fields=( + { + featurestore.Featurestore._resource_noun: featurestore_id, + featurestore.EntityType._resource_noun: entity_type_id, + } + if featurestore_id + else featurestore_id + ), ) def _get_featurestore_name(self) -> str: @@ -338,11 +340,11 @@ def list( resource_noun=featurestore.EntityType._resource_noun, parse_resource_name_method=featurestore.EntityType._parse_resource_name, format_resource_name_method=featurestore.EntityType._format_resource_name, - parent_resource_name_fields={ - featurestore.Featurestore._resource_noun: featurestore_id - } - if featurestore_id - else featurestore_id, + parent_resource_name_fields=( + {featurestore.Featurestore._resource_noun: featurestore_id} + if featurestore_id + else featurestore_id + ), project=project, location=location, resource_id_validator=featurestore.EntityType._resource_id_validator, @@ -585,11 +587,11 @@ def create( resource_noun=featurestore.EntityType._resource_noun, parse_resource_name_method=featurestore.EntityType._parse_resource_name, format_resource_name_method=featurestore.EntityType._format_resource_name, - parent_resource_name_fields={ - featurestore.Featurestore._resource_noun: featurestore_id - } - if featurestore_id - else featurestore_id, + parent_resource_name_fields=( + {featurestore.Featurestore._resource_noun: featurestore_id} + if featurestore_id + else featurestore_id + ), project=project, location=location, resource_id_validator=featurestore.EntityType._resource_id_validator, diff --git a/google/cloud/aiplatform/gapic/schema/__init__.py b/google/cloud/aiplatform/gapic/schema/__init__.py index 5d31a70f1f..3722e6ddd7 100644 --- a/google/cloud/aiplatform/gapic/schema/__init__.py +++ b/google/cloud/aiplatform/gapic/schema/__init__.py @@ -18,12 +18,20 @@ from google.cloud.aiplatform.v1.schema import predict from google.cloud.aiplatform.v1.schema import trainingjob from google.cloud.aiplatform.v1beta1.schema import predict as predict_v1beta1 -from google.cloud.aiplatform.v1beta1.schema import predict as trainingjob_v1beta1 +from google.cloud.aiplatform.v1beta1.schema import ( + predict as trainingjob_v1beta1, +) # import the v1 submodules for enhancement -from google.cloud.aiplatform.v1.schema.predict.instance_v1 import types as instance -from google.cloud.aiplatform.v1.schema.predict.params_v1 import types as params -from google.cloud.aiplatform.v1.schema.predict.prediction_v1 import types as prediction +from google.cloud.aiplatform.v1.schema.predict.instance_v1 import ( + types as instance, +) +from google.cloud.aiplatform.v1.schema.predict.params_v1 import ( + types as params, +) +from google.cloud.aiplatform.v1.schema.predict.prediction_v1 import ( + types as prediction, +) from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1 import ( types as definition, ) diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/hyperparameter_tuning.py b/google/cloud/aiplatform/hyperparameter_tuning.py index 871ae11790..b88f5a9488 100644 --- a/google/cloud/aiplatform/hyperparameter_tuning.py +++ b/google/cloud/aiplatform/hyperparameter_tuning.py @@ -140,7 +140,7 @@ def _to_parameter_spec( """Converts this parameter to ParameterSpec.""" conditions = [] if self.conditional_parameter_spec is not None: - for (conditional_param_id, spec) in self.conditional_parameter_spec.items(): + for conditional_param_id, spec in self.conditional_parameter_spec.items(): condition = ( gca_study_compat.StudySpec.ParameterSpec.ConditionalParameterSpec() ) @@ -178,7 +178,7 @@ def _to_parameter_spec_v1beta1( """Converts this parameter to ParameterSpec.""" conditions = [] if self.conditional_parameter_spec is not None: - for (conditional_param_id, spec) in self.conditional_parameter_spec.items(): + for conditional_param_id, spec in self.conditional_parameter_spec.items(): condition = ( gca_study_compat_v1beta1.StudySpec.ParameterSpec.ConditionalParameterSpec() ) diff --git a/google/cloud/aiplatform/initializer.py b/google/cloud/aiplatform/initializer.py index eb7747a43e..d9d360d1bb 100644 --- a/google/cloud/aiplatform/initializer.py +++ b/google/cloud/aiplatform/initializer.py @@ -94,7 +94,9 @@ def _set_project_as_env_var_or_google_auth_default(self): ) if project_number: if not self._credentials: - credentials, _ = google.auth.default() + credentials, _ = google.auth.default( + scopes=constants.DEFAULT_AUTHED_SCOPES + ) self._credentials = credentials # Try to convert project number to project ID which is more readable. try: @@ -114,7 +116,7 @@ def _set_project_as_env_var_or_google_auth_default(self): self._project = project if not self._credentials and not self._api_key: - credentials, _ = google.auth.default() + credentials, _ = google.auth.default(scopes=constants.DEFAULT_AUTHED_SCOPES) self._credentials = credentials def __init__(self): @@ -231,17 +233,22 @@ def init( f"{api_transport} is not a valid transport type. " + f"Valid transport types: {VALID_TRANSPORT_TYPES}" ) - else: # Raise error if api_transport other than rest is specified for usage with API key. + elif api_key and api_transport != "rest": + raise ValueError(f"{api_transport} is not supported with API keys. ") + else: if not project and not api_transport: api_transport = "rest" - elif not project and api_transport != "rest": - raise ValueError(f"{api_transport} is not supported with API keys. ") + if location: utils.validate_region(location) + # Set api_transport as "rest" if location is "global". + if location == "global" and not api_transport: + self._api_transport = "rest" if experiment_description and experiment is None: raise ValueError( - "Experiment needs to be set in `init` in order to add experiment descriptions." + "Experiment needs to be set in `init` in order to add experiment" + " descriptions." ) # reset metadata_service config if project or location is updated. @@ -464,8 +471,9 @@ def get_client_options( and not self._project and not self._location and not location_override - ): - # Default endpoint is location invariant if using API key + ) or (self._location == "global"): + # Default endpoint is location invariant if using API key or global + # location. api_endpoint = "aiplatform.googleapis.com" # If both project and API key are passed in, project takes precedence. @@ -665,6 +673,12 @@ def __getattr__(self, name: str): return _FunctionWrapperThatAddsDefaultMetadata(func) return result + def select_version(self, *args, **kwargs): + client = self._client.select_version(*args, **kwargs) + if global_config._request_metadata: + client = _ClientWrapperThatAddsDefaultMetadata(client) + return client + class _FunctionWrapperThatAddsDefaultMetadata: """A function wrapper that wraps a function/method to add default metadata.""" @@ -704,7 +718,7 @@ def _set_async_rest_credentials(credentials: AsyncCredentials): + "google-auth >= 2.35.0.\n\n" + "Install the following dependencies:\n" + "pip install google-api-core[grpc, async_rest] >= 2.21.0\n" - + "pip install google-auth[aiohttp] >= 2.35.0\n\n" + + "pip install google-auth[aiohttp] >= 2.47.0\n\n" + "Example usage:\n" + "from google.auth.aio.credentials import StaticCredentials\n" + "async_credentials = StaticCredentials(token=YOUR_TOKEN_HERE)\n" diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 9651200643..472c1661d5 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -45,6 +45,7 @@ model_deployment_monitoring_job as gca_model_deployment_monitoring_job_compat, job_state_v1beta1 as gca_job_state_v1beta1, model_monitoring_v1beta1 as gca_model_monitoring_v1beta1, + service_networking as gca_service_networking, ) # TODO(b/242108750): remove temporary logic once model monitoring for batch prediction is GA from google.cloud.aiplatform.constants import base as constants @@ -318,7 +319,7 @@ def cancel(self) -> None: getattr(self.api_client, self._cancel_method)(name=self.resource_name) -class BatchPredictionJob(_Job): +class BatchPredictionJob(_Job, base.PreviewMixin): _resource_noun = "batchPredictionJobs" _getter_method = "get_batch_prediction_job" @@ -328,6 +329,9 @@ class BatchPredictionJob(_Job): _job_type = "batch-predictions" _parse_resource_name_method = "parse_batch_prediction_job_path" _format_resource_name_method = "batch_prediction_job_path" + _preview_class = ( + "google.cloud.aiplatform.aiplatform.preview.jobs.BatchPredictionJob" + ) def __init__( self, @@ -948,6 +952,9 @@ def _submit_impl( ] = None, analysis_instance_schema_uri: Optional[str] = None, service_account: Optional[str] = None, + reservation_affinity_type: Optional[str] = None, + reservation_affinity_key: Optional[str] = None, + reservation_affinity_values: Optional[Sequence[str]] = None, wait_for_completion: bool = False, ) -> "BatchPredictionJob": """Create a batch prediction job. @@ -1135,6 +1142,18 @@ def _submit_impl( service_account (str): Optional. Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. + reservation_affinity_type (str): + Optional. The type of reservation affinity. + One of NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION, + SPECIFIC_THEN_ANY_RESERVATION, SPECIFIC_THEN_NO_RESERVATION + reservation_affinity_key (str): + Optional. Corresponds to the label key of a reservation resource. + To target a SPECIFIC_RESERVATION by name, use `compute.googleapis.com/reservation-name` as the key + and specify the name of your reservation as its value. + reservation_affinity_values (List[str]): + Optional. Corresponds to the label values of a reservation resource. + This must be the full resource name of the reservation. + Format: 'projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}' wait_for_completion (bool): Whether to wait for the job completion. Returns: @@ -1267,6 +1286,13 @@ def _submit_impl( machine_spec.accelerator_type = accelerator_type machine_spec.accelerator_count = accelerator_count + if reservation_affinity_type: + machine_spec.reservation_affinity = utils.get_reservation_affinity( + reservation_affinity_type, + reservation_affinity_key, + reservation_affinity_values, + ) + dedicated_resources = gca_machine_resources_compat.BatchDedicatedResources() dedicated_resources.machine_spec = machine_spec @@ -1324,12 +1350,16 @@ def _submit_impl( model_monitoring_alert_config._config_for_bp = True gapic_mm_config = gca_model_monitoring_v1beta1.ModelMonitoringConfig( objective_configs=[model_monitoring_objective_config.as_proto()], - alert_config=model_monitoring_alert_config.as_proto() - if model_monitoring_alert_config is not None - else None, - analysis_instance_schema_uri=analysis_instance_schema_uri - if analysis_instance_schema_uri is not None - else None, + alert_config=( + model_monitoring_alert_config.as_proto() + if model_monitoring_alert_config is not None + else None + ), + analysis_instance_schema_uri=( + analysis_instance_schema_uri + if analysis_instance_schema_uri is not None + else None + ), ) gapic_batch_prediction_job.model_monitoring_config = gapic_mm_config @@ -2236,6 +2266,9 @@ def run( persistent_resource_id: Optional[str] = None, scheduling_strategy: Optional[gca_custom_job_compat.Scheduling.Strategy] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> None: """Run this configured CustomJob. @@ -2310,6 +2343,9 @@ def run( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 1 day. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. """ network = network or initializer.global_config.network service_account = service_account or initializer.global_config.service_account @@ -2329,6 +2365,7 @@ def run( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) @base.optional_sync() @@ -2348,6 +2385,9 @@ def _run( persistent_resource_id: Optional[str] = None, scheduling_strategy: Optional[gca_custom_job_compat.Scheduling.Strategy] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> None: """Helper method to ensure network synchronization and to run the configured CustomJob. @@ -2420,6 +2460,9 @@ def _run( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 1 day. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. """ self.submit( service_account=service_account, @@ -2435,6 +2478,7 @@ def _run( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) self._block_until_complete() @@ -2455,6 +2499,9 @@ def submit( persistent_resource_id: Optional[str] = None, scheduling_strategy: Optional[gca_custom_job_compat.Scheduling.Strategy] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> None: """Submit the configured CustomJob. @@ -2524,6 +2571,9 @@ def submit( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 1 day. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. Raises: ValueError: @@ -2546,6 +2596,9 @@ def submit( if network: self._gca_resource.job_spec.network = network + if psc_interface_config: + self._gca_resource.job_spec.psc_interface_config = psc_interface_config + if ( timeout or restart_job_on_worker_restart @@ -3273,7 +3326,7 @@ def _parse_configs( + "]. Note that deployed model IDs are different from the uploaded model's ID" ) raise ValueError(error_string) - for (deployed_model, objective_config) in objective_configs.items(): + for deployed_model, objective_config in objective_configs.items(): if ( deployed_model not in xai_enabled and objective_config.explanation_config is not None diff --git a/google/cloud/aiplatform/matching_engine/_protos/README b/google/cloud/aiplatform/matching_engine/_protos/README index 5a31eea525..ac82c72ada 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/README +++ b/google/cloud/aiplatform/matching_engine/_protos/README @@ -1,22 +1,19 @@ Steps to update the pb2 files before bazel generation is supported Track bazel progress here: https://github.com/googleapis/python-aiplatform/issues/3037 -1 . Ensure that your environment is using python 3.10 or older which is needed -for step 2. Consider using Anaconda to create envs with older python version. +1 . Use protobuf version that matches the other pb files in the repo, currently 4.25.3. -2. Use `pip install grpcio-tools==1.59.0` or newer to support protobuf 5.x - -3. Copy the file from +2. Copy the file from `https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto` into `google/rpc/status.proto`. Make sure that `google/rpc/status.proto` exists relative to the root of the python-aiplatform repo. -4. In the root directory python-aiplatform run +3. In the root directory python-aiplatform run ``` python -m grpc_tools.protoc --proto_path=. --python_out=. --grpc_python_out=. google/cloud/aiplatform/matching_engine/_protos/match_service.proto ``` -5. After running the command, change the import statements manually +4. After running the command, change the import statements manually ``` from google.cloud.aiplatform.matching_engine._protos import match_service_pb2 as google_dot_cloud_dot_aiplatform_dot_matching__engine_dot___protos_dot_match__service__pb2 ``` @@ -28,8 +25,8 @@ Also, run a find a replace to change `google_dot_cloud_dot_aiplatform_dot_matching__engine_dot___protos_dot_match__service__pb2` to `match_service_pb2` -6. Run `black` on both files to reformat the code +5. Run `black` on both files to reformat the code -7. Remove the status.proto added in step 3 +6. Remove the status.proto added in step 3 -8. Create a Pull Request in GitHub \ No newline at end of file +7. Create a Pull Request in GitHub \ No newline at end of file diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service.proto b/google/cloud/aiplatform/matching_engine/_protos/match_service.proto index ab4ee104be..ad80af2f0d 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/match_service.proto +++ b/google/cloud/aiplatform/matching_engine/_protos/match_service.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package google.cloud.aiplatform.container.v1; import "google/rpc/status.proto"; +import "google/protobuf/struct.proto"; // MatchService is a Google managed service for efficient vector similarity // search at scale. @@ -21,15 +22,14 @@ service MatchService { } // Feature embedding vector for sparse index. An array of numbers whose values - // are located in the specified dimensions. - message SparseEmbedding { +// are located in the specified dimensions. +message SparseEmbedding { + // The list of embedding values of the sparse vector. + repeated float float_val = 1; - // The list of embedding values of the sparse vector. - repeated float float_val = 1; - - // The list of indexes for the embedding values of the sparse vector. - repeated int64 dimension = 2; - } + // The list of indexes for the embedding values of the sparse vector. + repeated int64 dimension = 2; +} // Parameters for a match query. message MatchRequest { @@ -50,7 +50,6 @@ message MatchRequest { // Parameters for RRF algorithm that combines search results. message RRF { - // Users can provide an alpha value to give more weight to sparse vs dense. // For example, if the alpha is 0, we don't return dense at all, if it's 1, // we don't return sparse at all. @@ -70,7 +69,7 @@ message MatchRequest { // The list of restricts. repeated Namespace restricts = 4; - //The list of numeric restricts. + // The list of numeric restricts. repeated NumericNamespace numeric_restricts = 11; // Crowding is a constraint on a neighbor list produced by nearest neighbor @@ -128,6 +127,9 @@ message Embedding { // to return per crowding attribute value // (per_crowding_attribute_num_neighbors) is configured per-query. int64 crowding_attribute = 4; + + // The key-value map of additional metadata for the datapoint. + google.protobuf.Struct embedding_metadata = 7; } // Response of a match query. @@ -238,7 +240,6 @@ message Namespace { // eligible for each matching query, overall query is an AND across namespaces. // This uses numeric comparisons. message NumericNamespace { - // The string name of the namespace that this proto is specifying, // such as "size" or "cost". string name = 1; diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py index 77b44a8ce4..72250a6b07 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py +++ b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2.py @@ -16,22 +16,24 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/aiplatform/matching_engine/_protos/match_service.proto +# Protobuf Python Version: 4.25.3 """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database - +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 +from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 + DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\nCgoogle/cloud/aiplatform/matching_engine/_protos/match_service.proto\x12$google.cloud.aiplatform.container.v1\x1a\x17google/rpc/status.proto"7\n\x0fSparseEmbedding\x12\x11\n\tfloat_val\x18\x01 \x03(\x02\x12\x11\n\tdimension\x18\x02 \x03(\x03"\xff\x04\n\x0cMatchRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12O\n\x10sparse_embedding\x18\x0c \x01(\x0b\x32\x35.google.cloud.aiplatform.container.v1.SparseEmbedding\x12\x45\n\x03rrf\x18\r \x01(\x0b\x32\x36.google.cloud.aiplatform.container.v1.MatchRequest.RRFH\x00\x12\x15\n\rnum_neighbors\x18\x03 \x01(\x05\x12\x42\n\trestricts\x18\x04 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x0b \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12,\n$per_crowding_attribute_num_neighbors\x18\x05 \x01(\x05\x12\x1c\n\x14\x61pprox_num_neighbors\x18\x06 \x01(\x05\x12-\n%leaf_nodes_to_search_percent_override\x18\x07 \x01(\x05\x12.\n&fraction_leaf_nodes_to_search_override\x18\t \x01(\x01\x12\x19\n\x11\x65mbedding_enabled\x18\x08 \x01(\x08\x12\x14\n\x0c\x65mbedding_id\x18\n \x01(\t\x1a\x14\n\x03RRF\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x42\t\n\x07ranking"\xae\x02\n\tEmbedding\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12O\n\x10sparse_embedding\x18\x06 \x01(\x0b\x32\x35.google.cloud.aiplatform.container.v1.SparseEmbedding\x12\x42\n\trestricts\x18\x03 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x05 \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12\x1a\n\x12\x63rowding_attribute\x18\x04 \x01(\x03"\x83\x02\n\rMatchResponse\x12N\n\x08neighbor\x18\x01 \x03(\x0b\x32<.google.cloud.aiplatform.container.v1.MatchResponse.Neighbor\x12\x43\n\nembeddings\x18\x02 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding\x1a]\n\x08Neighbor\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x64istance\x18\x02 \x01(\x01\x12\x17\n\x0fsparse_distance\x18\x04 \x01(\x01\x12\x1a\n\x12\x63rowding_attribute\x18\x03 \x01(\x03"B\n\x19\x42\x61tchGetEmbeddingsRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x03(\t"a\n\x1a\x42\x61tchGetEmbeddingsResponse\x12\x43\n\nembeddings\x18\x01 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding"\x95\x02\n\x11\x42\x61tchMatchRequest\x12\x63\n\x08requests\x18\x01 \x03(\x0b\x32Q.google.cloud.aiplatform.container.v1.BatchMatchRequest.BatchMatchRequestPerIndex\x1a\x9a\x01\n\x19\x42\x61tchMatchRequestPerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x44\n\x08requests\x18\x02 \x03(\x0b\x32\x32.google.cloud.aiplatform.container.v1.MatchRequest\x12\x1c\n\x14low_level_batch_size\x18\x03 \x01(\x05"\xa2\x02\n\x12\x42\x61tchMatchResponse\x12\x66\n\tresponses\x18\x01 \x03(\x0b\x32S.google.cloud.aiplatform.container.v1.BatchMatchResponse.BatchMatchResponsePerIndex\x1a\xa3\x01\n\x1a\x42\x61tchMatchResponsePerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x46\n\tresponses\x18\x02 \x03(\x0b\x32\x33.google.cloud.aiplatform.container.v1.MatchResponse\x12"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status"D\n\tNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61llow_tokens\x18\x02 \x03(\t\x12\x13\n\x0b\x64\x65ny_tokens\x18\x03 \x03(\t"\xb4\x02\n\x10NumericNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\tvalue_int\x18\x02 \x01(\x03H\x00\x12\x15\n\x0bvalue_float\x18\x03 \x01(\x02H\x00\x12\x16\n\x0cvalue_double\x18\x04 \x01(\x01H\x00\x12K\n\x02op\x18\x05 \x01(\x0e\x32?.google.cloud.aiplatform.container.v1.NumericNamespace.Operator"x\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\x08\n\x04LESS\x10\x01\x12\x0e\n\nLESS_EQUAL\x10\x02\x12\t\n\x05\x45QUAL\x10\x03\x12\x11\n\rGREATER_EQUAL\x10\x04\x12\x0b\n\x07GREATER\x10\x05\x12\r\n\tNOT_EQUAL\x10\x06\x42\x07\n\x05Value2\xa2\x03\n\x0cMatchService\x12r\n\x05Match\x12\x32.google.cloud.aiplatform.container.v1.MatchRequest\x1a\x33.google.cloud.aiplatform.container.v1.MatchResponse"\x00\x12\x81\x01\n\nBatchMatch\x12\x37.google.cloud.aiplatform.container.v1.BatchMatchRequest\x1a\x38.google.cloud.aiplatform.container.v1.BatchMatchResponse"\x00\x12\x99\x01\n\x12\x42\x61tchGetEmbeddings\x12?.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsRequest\x1a@.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsResponse"\x00\x62\x06proto3' + b'\nCgoogle/cloud/aiplatform/matching_engine/_protos/match_service.proto\x12$google.cloud.aiplatform.container.v1\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/protobuf/struct.proto"7\n\x0fSparseEmbedding\x12\x11\n\tfloat_val\x18\x01 \x03(\x02\x12\x11\n\tdimension\x18\x02 \x03(\x03"\xff\x04\n\x0cMatchRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12O\n\x10sparse_embedding\x18\x0c \x01(\x0b\x32\x35.google.cloud.aiplatform.container.v1.SparseEmbedding\x12\x45\n\x03rrf\x18\r \x01(\x0b\x32\x36.google.cloud.aiplatform.container.v1.MatchRequest.RRFH\x00\x12\x15\n\rnum_neighbors\x18\x03 \x01(\x05\x12\x42\n\trestricts\x18\x04 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x0b \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12,\n$per_crowding_attribute_num_neighbors\x18\x05 \x01(\x05\x12\x1c\n\x14\x61pprox_num_neighbors\x18\x06 \x01(\x05\x12-\n%leaf_nodes_to_search_percent_override\x18\x07 \x01(\x05\x12.\n&fraction_leaf_nodes_to_search_override\x18\t \x01(\x01\x12\x19\n\x11\x65mbedding_enabled\x18\x08 \x01(\x08\x12\x14\n\x0c\x65mbedding_id\x18\n \x01(\t\x1a\x14\n\x03RRF\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x42\t\n\x07ranking"\xe3\x02\n\tEmbedding\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tfloat_val\x18\x02 \x03(\x02\x12O\n\x10sparse_embedding\x18\x06 \x01(\x0b\x32\x35.google.cloud.aiplatform.container.v1.SparseEmbedding\x12\x42\n\trestricts\x18\x03 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Namespace\x12Q\n\x11numeric_restricts\x18\x05 \x03(\x0b\x32\x36.google.cloud.aiplatform.container.v1.NumericNamespace\x12\x1a\n\x12\x63rowding_attribute\x18\x04 \x01(\x03\x12\x33\n\x12\x65mbedding_metadata\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct"\x83\x02\n\rMatchResponse\x12N\n\x08neighbor\x18\x01 \x03(\x0b\x32<.google.cloud.aiplatform.container.v1.MatchResponse.Neighbor\x12\x43\n\nembeddings\x18\x02 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding\x1a]\n\x08Neighbor\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x64istance\x18\x02 \x01(\x01\x12\x17\n\x0fsparse_distance\x18\x04 \x01(\x01\x12\x1a\n\x12\x63rowding_attribute\x18\x03 \x01(\x03"B\n\x19\x42\x61tchGetEmbeddingsRequest\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x03(\t"a\n\x1a\x42\x61tchGetEmbeddingsResponse\x12\x43\n\nembeddings\x18\x01 \x03(\x0b\x32/.google.cloud.aiplatform.container.v1.Embedding"\x95\x02\n\x11\x42\x61tchMatchRequest\x12\x63\n\x08requests\x18\x01 \x03(\x0b\x32Q.google.cloud.aiplatform.container.v1.BatchMatchRequest.BatchMatchRequestPerIndex\x1a\x9a\x01\n\x19\x42\x61tchMatchRequestPerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x44\n\x08requests\x18\x02 \x03(\x0b\x32\x32.google.cloud.aiplatform.container.v1.MatchRequest\x12\x1c\n\x14low_level_batch_size\x18\x03 \x01(\x05"\xa2\x02\n\x12\x42\x61tchMatchResponse\x12\x66\n\tresponses\x18\x01 \x03(\x0b\x32S.google.cloud.aiplatform.container.v1.BatchMatchResponse.BatchMatchResponsePerIndex\x1a\xa3\x01\n\x1a\x42\x61tchMatchResponsePerIndex\x12\x19\n\x11\x64\x65ployed_index_id\x18\x01 \x01(\t\x12\x46\n\tresponses\x18\x02 \x03(\x0b\x32\x33.google.cloud.aiplatform.container.v1.MatchResponse\x12"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status"D\n\tNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61llow_tokens\x18\x02 \x03(\t\x12\x13\n\x0b\x64\x65ny_tokens\x18\x03 \x03(\t"\xb4\x02\n\x10NumericNamespace\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\tvalue_int\x18\x02 \x01(\x03H\x00\x12\x15\n\x0bvalue_float\x18\x03 \x01(\x02H\x00\x12\x16\n\x0cvalue_double\x18\x04 \x01(\x01H\x00\x12K\n\x02op\x18\x05 \x01(\x0e\x32?.google.cloud.aiplatform.container.v1.NumericNamespace.Operator"x\n\x08Operator\x12\x18\n\x14OPERATOR_UNSPECIFIED\x10\x00\x12\x08\n\x04LESS\x10\x01\x12\x0e\n\nLESS_EQUAL\x10\x02\x12\t\n\x05\x45QUAL\x10\x03\x12\x11\n\rGREATER_EQUAL\x10\x04\x12\x0b\n\x07GREATER\x10\x05\x12\r\n\tNOT_EQUAL\x10\x06\x42\x07\n\x05Value2\xa2\x03\n\x0cMatchService\x12r\n\x05Match\x12\x32.google.cloud.aiplatform.container.v1.MatchRequest\x1a\x33.google.cloud.aiplatform.container.v1.MatchResponse"\x00\x12\x81\x01\n\nBatchMatch\x12\x37.google.cloud.aiplatform.container.v1.BatchMatchRequest\x1a\x38.google.cloud.aiplatform.container.v1.BatchMatchResponse"\x00\x12\x99\x01\n\x12\x42\x61tchGetEmbeddings\x12?.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsRequest\x1a@.google.cloud.aiplatform.container.v1.BatchGetEmbeddingsResponse"\x00\x62\x06proto3' ) _globals = globals() @@ -43,36 +45,36 @@ ) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _globals["_SPARSEEMBEDDING"]._serialized_start = 134 - _globals["_SPARSEEMBEDDING"]._serialized_end = 189 - _globals["_MATCHREQUEST"]._serialized_start = 192 - _globals["_MATCHREQUEST"]._serialized_end = 831 - _globals["_MATCHREQUEST_RRF"]._serialized_start = 800 - _globals["_MATCHREQUEST_RRF"]._serialized_end = 820 - _globals["_EMBEDDING"]._serialized_start = 834 - _globals["_EMBEDDING"]._serialized_end = 1136 - _globals["_MATCHRESPONSE"]._serialized_start = 1139 - _globals["_MATCHRESPONSE"]._serialized_end = 1398 - _globals["_MATCHRESPONSE_NEIGHBOR"]._serialized_start = 1305 - _globals["_MATCHRESPONSE_NEIGHBOR"]._serialized_end = 1398 - _globals["_BATCHGETEMBEDDINGSREQUEST"]._serialized_start = 1400 - _globals["_BATCHGETEMBEDDINGSREQUEST"]._serialized_end = 1466 - _globals["_BATCHGETEMBEDDINGSRESPONSE"]._serialized_start = 1468 - _globals["_BATCHGETEMBEDDINGSRESPONSE"]._serialized_end = 1565 - _globals["_BATCHMATCHREQUEST"]._serialized_start = 1568 - _globals["_BATCHMATCHREQUEST"]._serialized_end = 1845 - _globals["_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX"]._serialized_start = 1691 - _globals["_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX"]._serialized_end = 1845 - _globals["_BATCHMATCHRESPONSE"]._serialized_start = 1848 - _globals["_BATCHMATCHRESPONSE"]._serialized_end = 2138 - _globals["_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX"]._serialized_start = 1975 - _globals["_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX"]._serialized_end = 2138 - _globals["_NAMESPACE"]._serialized_start = 2140 - _globals["_NAMESPACE"]._serialized_end = 2208 - _globals["_NUMERICNAMESPACE"]._serialized_start = 2211 - _globals["_NUMERICNAMESPACE"]._serialized_end = 2519 - _globals["_NUMERICNAMESPACE_OPERATOR"]._serialized_start = 2390 - _globals["_NUMERICNAMESPACE_OPERATOR"]._serialized_end = 2510 - _globals["_MATCHSERVICE"]._serialized_start = 2522 - _globals["_MATCHSERVICE"]._serialized_end = 2940 + _globals["_SPARSEEMBEDDING"]._serialized_start = 164 + _globals["_SPARSEEMBEDDING"]._serialized_end = 219 + _globals["_MATCHREQUEST"]._serialized_start = 222 + _globals["_MATCHREQUEST"]._serialized_end = 861 + _globals["_MATCHREQUEST_RRF"]._serialized_start = 830 + _globals["_MATCHREQUEST_RRF"]._serialized_end = 850 + _globals["_EMBEDDING"]._serialized_start = 864 + _globals["_EMBEDDING"]._serialized_end = 1219 + _globals["_MATCHRESPONSE"]._serialized_start = 1222 + _globals["_MATCHRESPONSE"]._serialized_end = 1481 + _globals["_MATCHRESPONSE_NEIGHBOR"]._serialized_start = 1388 + _globals["_MATCHRESPONSE_NEIGHBOR"]._serialized_end = 1481 + _globals["_BATCHGETEMBEDDINGSREQUEST"]._serialized_start = 1483 + _globals["_BATCHGETEMBEDDINGSREQUEST"]._serialized_end = 1549 + _globals["_BATCHGETEMBEDDINGSRESPONSE"]._serialized_start = 1551 + _globals["_BATCHGETEMBEDDINGSRESPONSE"]._serialized_end = 1648 + _globals["_BATCHMATCHREQUEST"]._serialized_start = 1651 + _globals["_BATCHMATCHREQUEST"]._serialized_end = 1928 + _globals["_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX"]._serialized_start = 1774 + _globals["_BATCHMATCHREQUEST_BATCHMATCHREQUESTPERINDEX"]._serialized_end = 1928 + _globals["_BATCHMATCHRESPONSE"]._serialized_start = 1931 + _globals["_BATCHMATCHRESPONSE"]._serialized_end = 2221 + _globals["_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX"]._serialized_start = 2058 + _globals["_BATCHMATCHRESPONSE_BATCHMATCHRESPONSEPERINDEX"]._serialized_end = 2221 + _globals["_NAMESPACE"]._serialized_start = 2223 + _globals["_NAMESPACE"]._serialized_end = 2291 + _globals["_NUMERICNAMESPACE"]._serialized_start = 2294 + _globals["_NUMERICNAMESPACE"]._serialized_end = 2602 + _globals["_NUMERICNAMESPACE_OPERATOR"]._serialized_start = 2473 + _globals["_NUMERICNAMESPACE_OPERATOR"]._serialized_end = 2593 + _globals["_MATCHSERVICE"]._serialized_start = 2605 + _globals["_MATCHSERVICE"]._serialized_end = 3023 # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2_grpc.py b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2_grpc.py index bfa4611fd1..24a9c7fffa 100644 --- a/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2_grpc.py +++ b/google/cloud/aiplatform/matching_engine/_protos/match_service_pb2_grpc.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +15,7 @@ # # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" + import grpc from google.cloud.aiplatform.matching_engine._protos import match_service_pb2 diff --git a/google/cloud/aiplatform/matching_engine/matching_engine_index.py b/google/cloud/aiplatform/matching_engine/matching_engine_index.py index a062407755..7922cba7e1 100644 --- a/google/cloud/aiplatform/matching_engine/matching_engine_index.py +++ b/google/cloud/aiplatform/matching_engine/matching_engine_index.py @@ -20,8 +20,10 @@ from google.auth import credentials as auth_credentials from google.protobuf import field_mask_pb2 from google.cloud.aiplatform import base +from google.cloud.aiplatform import compat from google.cloud.aiplatform.compat.types import ( index_service as gca_index_service, + index_service_v1beta1 as gca_index_service_v1beta1, matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, matching_engine_index as gca_matching_engine_index, encryption_spec as gca_encryption_spec, @@ -393,6 +395,58 @@ def update_embeddings( return self + def import_embeddings( + self, + config: gca_index_service_v1beta1.ImportIndexRequest.ConnectorConfig, + is_complete_overwrite: Optional[bool] = None, + request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + import_request_timeout: Optional[float] = None, + ) -> "MatchingEngineIndex": + """Imports embeddings from an external source, e.g., BigQuery. + + Args: + config (aiplatform.compat.types.index_service.ConnectorConfig): + Required. The configuration for importing data from an external source. + is_complete_overwrite (bool): + Optional. If true, completely replace existing index data. Must be + true for streaming update indexes. + request_metadata (Sequence[Tuple[str, str]]): + Optional. Strings which should be sent along with the request as metadata. + import_request_timeout (float): + Optional. The timeout for the request in seconds. + + Returns: + MatchingEngineIndex - The updated index resource object. + """ + self.wait() + + _LOGGER.log_action_start_against_resource( + "Importing embeddings", + "index", + self, + ) + + api_v1beta1_client = self.api_client.select_version(compat.V1BETA1) + import_lro = api_v1beta1_client.import_index( + request=gca_index_service_v1beta1.ImportIndexRequest( + name=self.resource_name, + config=config, + is_complete_overwrite=is_complete_overwrite, + ), + metadata=request_metadata, + timeout=import_request_timeout, + ) + + _LOGGER.log_action_started_against_resource_with_lro( + "Import", "index", self.__class__, import_lro + ) + + self._gca_resource = import_lro.result(timeout=None) + + _LOGGER.log_action_completed_against_resource("index", "Imported", self) + + return self + @property def deployed_indexes( self, @@ -534,6 +588,7 @@ def create_tree_ah_index( SHARD_SIZE_SMALL SHARD_SIZE_MEDIUM SHARD_SIZE_LARGE + SHARD_SIZE_SO_DYNAMIC Returns: @@ -541,10 +596,15 @@ def create_tree_ah_index( """ - algorithm_config = matching_engine_index_config.TreeAhConfig( - leaf_node_embedding_count=leaf_node_embedding_count, - leaf_nodes_to_search_percent=leaf_nodes_to_search_percent, - ) + algorithm_config = None + if ( + leaf_node_embedding_count is not None + or leaf_nodes_to_search_percent is not None + ): + algorithm_config = matching_engine_index_config.TreeAhConfig( + leaf_node_embedding_count=leaf_node_embedding_count, + leaf_nodes_to_search_percent=leaf_nodes_to_search_percent, + ) config = matching_engine_index_config.MatchingEngineIndexConfig( dimensions=dimensions, @@ -686,6 +746,7 @@ def create_brute_force_index( SHARD_SIZE_SMALL SHARD_SIZE_MEDIUM SHARD_SIZE_LARGE + SHARD_SIZE_SO_DYNAMIC Returns: MatchingEngineIndex - Index resource object @@ -736,6 +797,7 @@ def upsert_datapoints( Updatable fields: Use `all_restricts` to update both `restricts` and `numeric_restricts`. + Use `embedding_metadata` to update `embedding_metadata`. Returns: MatchingEngineIndex - Index resource object diff --git a/google/cloud/aiplatform/matching_engine/matching_engine_index_config.py b/google/cloud/aiplatform/matching_engine/matching_engine_index_config.py index e388b78a1f..f9140dd1d1 100644 --- a/google/cloud/aiplatform/matching_engine/matching_engine_index_config.py +++ b/google/cloud/aiplatform/matching_engine/matching_engine_index_config.py @@ -120,7 +120,7 @@ class MatchingEngineIndexConfig: dimensions (int): Required. The number of dimensions of the input vectors. algorithm_config (AlgorithmConfig): - Required. The configuration with regard to the algorithms used for efficient search. + Optional. The configuration with regard to the algorithms used for efficient search. approximate_neighbors_count (int): Optional. The default number of neighbors to find via approximate search before exact reordering is performed. Exact reordering is a procedure where results returned by an @@ -139,7 +139,7 @@ class MatchingEngineIndexConfig: """ dimensions: int - algorithm_config: AlgorithmConfig + algorithm_config: Optional[AlgorithmConfig] = None approximate_neighbors_count: Optional[int] = None distance_measure_type: Optional[DistanceMeasureType] = None feature_norm_type: Optional[FeatureNormType] = None @@ -153,10 +153,13 @@ def as_dict(self) -> Dict[str, Any]: """ res = { "dimensions": self.dimensions, - "algorithmConfig": self.algorithm_config.as_dict(), "approximateNeighborsCount": self.approximate_neighbors_count, "distanceMeasureType": self.distance_measure_type, "featureNormType": self.feature_norm_type, "shardSize": self.shard_size, } + if self.algorithm_config: + res["algorithmConfig"] = self.algorithm_config.as_dict() + else: + res["algorithmConfig"] = None return res diff --git a/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py b/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py index 6c51b07281..94da9ac225 100644 --- a/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py +++ b/google/cloud/aiplatform/matching_engine/matching_engine_index_endpoint.py @@ -16,7 +16,7 @@ # from dataclasses import dataclass, field -from typing import Dict, List, Optional, Sequence, Tuple, Union +from typing import Dict, List, Optional, Sequence, Tuple, Union, Any, Mapping from google.auth import credentials as auth_credentials from google.cloud.aiplatform import base @@ -151,7 +151,7 @@ def __post_init__(self): @dataclass class HybridQuery: """ - Hyrbid query. Could be used for dense-only or sparse-only or hybrid queries. + Hybrid query. Could be used for dense-only or sparse-only or hybrid queries. dense_embedding (List[float]): Optional. The dense part of the hybrid queries. @@ -208,6 +208,8 @@ class MatchNeighbor: For example, values [1,2,3] with dimensions [4,5,6] means value 1 is of the 4th dimension, value 2 is of the 4th dimension, and value 3 is of the 6th dimension. + embedding_metadata (Mapping[str, Any]): + Optional. The corresponding embedding metadata of the matching datapoint. """ @@ -220,6 +222,7 @@ class MatchNeighbor: numeric_restricts: Optional[List[NumericNamespace]] = None sparse_embedding_values: Optional[List[float]] = None sparse_embedding_dimensions: Optional[List[int]] = None + embedding_metadata: Optional[Mapping[str, Any]] = None def from_index_datapoint( self, index_datapoint: gca_index_v1beta1.IndexDatapoint @@ -276,6 +279,9 @@ def from_index_datapoint( self.sparse_embedding_dimensions = ( index_datapoint.sparse_embedding.dimensions ) + # retrieve embedding metadata + if index_datapoint.embedding_metadata is not None: + self.embedding_metadata = dict(index_datapoint.embedding_metadata) return self def from_embedding(self, embedding: match_service_pb2.Embedding) -> "MatchNeighbor": @@ -322,6 +328,10 @@ def from_embedding(self, embedding: match_service_pb2.Embedding) -> "MatchNeighb if embedding.sparse_embedding: self.sparse_embedding_values = embedding.sparse_embedding.float_val self.sparse_embedding_dimensions = embedding.sparse_embedding.dimension + + # retrieve embedding metadata + if embedding.embedding_metadata: + self.embedding_metadata = dict(embedding.embedding_metadata) return self @@ -702,10 +712,10 @@ def _get_psc_automated_deployed_index_ip_address( psc_network: Optional[str] = None, ) -> str: """Helper method to get the ip address for a psc automated endpoint. - Returns: + Args: deployed_index_id (str): - Optional. Required for private service access endpoint. - The user specified ID of the DeployedIndex. + Optional. Required for private service access endpoint. + The user specified ID of the DeployedIndex. deployed_index (gca_matching_engine_index_endpoint.DeployedIndex): Optional. Required for private service access endpoint. The DeployedIndex resource. @@ -722,6 +732,9 @@ def _get_psc_automated_deployed_index_ip_address( MatchingEngineIndexEndpoint instance, if the ip address is already known. + Returns: + str: The IP address for the PSC automated endpoint. + Raises: RuntimeError: No valid ip found for deployed index with id deployed_index_id within network psc_network, invalid PSC @@ -831,9 +844,9 @@ def _instantiate_private_match_service_stub( if ip_address not in self._match_grpc_stub_cache: # Set up channel and stub channel = grpc.insecure_channel("{}:10000".format(ip_address)) - self._match_grpc_stub_cache[ - ip_address - ] = match_service_pb2_grpc.MatchServiceStub(channel) + self._match_grpc_stub_cache[ip_address] = ( + match_service_pb2_grpc.MatchServiceStub(channel) + ) return self._match_grpc_stub_cache[ip_address] @property @@ -944,6 +957,7 @@ def _build_deployed_index( auth_config_audiences: Optional[Sequence[str]] = None, auth_config_allowed_issuers: Optional[Sequence[str]] = None, psc_automation_configs: Optional[Sequence[Tuple[str, str]]] = None, + deployment_tier: Optional[str] = None, ) -> gca_matching_engine_index_endpoint.DeployedIndex: """Builds a DeployedIndex. @@ -1046,6 +1060,8 @@ def _build_deployed_index( projects/{project}/global/networks/{network}, where {project} is a project number, as in '12345', and {network} is network name. + deployment_tier (str): + Optional. The deployment tier that the index is deployed to. """ @@ -1056,6 +1072,7 @@ def _build_deployed_index( enable_access_logging=enable_access_logging, reserved_ip_ranges=reserved_ip_ranges, deployment_group=deployment_group, + deployment_tier=deployment_tier, ) if auth_config_audiences and auth_config_allowed_issuers: @@ -1112,8 +1129,10 @@ def deploy_index( auth_config_audiences: Optional[Sequence[str]] = None, auth_config_allowed_issuers: Optional[Sequence[str]] = None, request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + sync: bool = True, deploy_request_timeout: Optional[float] = None, psc_automation_configs: Optional[Sequence[Tuple[str, str]]] = None, + deployment_tier: Optional[str] = None, ) -> "MatchingEngineIndexEndpoint": """Deploys an existing index resource to this endpoint resource. @@ -1207,10 +1226,171 @@ def deploy_index( auth_config_audiences and auth_config_allowed_issuers must be passed together. request_metadata (Sequence[Tuple[str, str]]): Optional. Strings which should be sent along with the request as metadata. - + sync (bool): + Whether to execute this method synchronously. If False, this method + will be executed in a concurrent Future and any downstream object will + be immediately returned and synced when the Future has completed. deploy_request_timeout (float): Optional. The timeout for the request in seconds. + psc_automation_configs (Sequence[Tuple[str, str]]): + Optional. A list of (project_id, network) pairs for Private + Service Connection endpoints to be setup for the deployed index. + The project_id is the project number of the project that the + network is in, and network is the name of the network. + Network is the full name of the Google Compute Engine + `network `__ + to which the index should be deployed to. + Format:{projects/{project}/global/networks/{network}. Where + {project} is a project number, as in '12345', and {network} + is network name. + + For example: + [(project_id_1, network_1), (project_id_1, network_2))] will enable + PSC automation for the index to be deployed to project_id_1's network_1 + and network_2 and can be queried within these networks. + deployment_tier (str): + Optional. The deployment tier that the index is deployed to. + Returns: + MatchingEngineIndexEndpoint - IndexEndpoint resource object + """ + return self._deploy_index( + index=index, + deployed_index_id=deployed_index_id, + display_name=display_name, + machine_type=machine_type, + min_replica_count=min_replica_count, + max_replica_count=max_replica_count, + enable_access_logging=enable_access_logging, + reserved_ip_ranges=reserved_ip_ranges, + deployment_group=deployment_group, + auth_config_audiences=auth_config_audiences, + auth_config_allowed_issuers=auth_config_allowed_issuers, + request_metadata=request_metadata, + sync=sync, + deploy_request_timeout=deploy_request_timeout, + psc_automation_configs=psc_automation_configs, + deployment_tier=deployment_tier, + ) + + @base.optional_sync(return_input_arg="self") + def _deploy_index( + self, + index: matching_engine.MatchingEngineIndex, + deployed_index_id: str, + display_name: Optional[str] = None, + machine_type: Optional[str] = None, + min_replica_count: Optional[int] = None, + max_replica_count: Optional[int] = None, + enable_access_logging: Optional[bool] = None, + reserved_ip_ranges: Optional[Sequence[str]] = None, + deployment_group: Optional[str] = None, + auth_config_audiences: Optional[Sequence[str]] = None, + auth_config_allowed_issuers: Optional[Sequence[str]] = None, + request_metadata: Optional[Sequence[Tuple[str, str]]] = (), + sync: bool = True, + deploy_request_timeout: Optional[float] = None, + psc_automation_configs: Optional[Sequence[Tuple[str, str]]] = None, + deployment_tier: Optional[str] = None, + ) -> "MatchingEngineIndexEndpoint": + """Helper method to deploy an existing index resource to this endpoint resource. + + Args: + index (MatchingEngineIndex): + Required. The Index this is the + deployment of. We may refer to this Index as the + DeployedIndex's "original" Index. + deployed_index_id (str): + Required. The user specified ID of the + DeployedIndex. The ID can be up to 128 + characters long and must start with a letter and + only contain letters, numbers, and underscores. + The ID must be unique within the project it is + created in. + display_name (str): + The display name of the DeployedIndex. If not provided upon + creation, the Index's display_name is used. + machine_type (str): + Optional. The type of machine. Not specifying machine type will + result in model to be deployed with automatic resources. + min_replica_count (int): + Optional. The minimum number of machine replicas this deployed + model will be always deployed on. If traffic against it increases, + it may dynamically be deployed onto more replicas, and as traffic + decreases, some of these extra replicas may be freed. + + If this value is not provided, the value of 2 will be used. + max_replica_count (int): + Optional. The maximum number of replicas this deployed model may + be deployed on when the traffic against it increases. If requested + value is too large, the deployment will error, but if deployment + succeeds then the ability to scale the model to that many replicas + is guaranteed (barring service outages). If traffic against the + deployed model increases beyond what its replicas at maximum may + handle, a portion of the traffic will be dropped. If this value + is not provided, the larger value of min_replica_count or 2 will + be used. If value provided is smaller than min_replica_count, it + will automatically be increased to be min_replica_count. + enable_access_logging (bool): + Optional. If true, private endpoint's access + logs are sent to StackDriver Logging. + These logs are like standard server access logs, + containing information like timestamp and + latency for each MatchRequest. + Note that Stackdriver logs may incur a cost, + especially if the deployed index receives a high + queries per second rate (QPS). Estimate your + costs before enabling this option. + reserved_ip_ranges (Sequence[str]): + Optional. A list of reserved ip ranges under + the VPC network that can be used for this + DeployedIndex. + If set, we will deploy the index within the + provided ip ranges. Otherwise, the index might + be deployed to any ip ranges under the provided + VPC network. + + The value sohuld be the name of the address + (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) + Example: 'vertex-ai-ip-range'. + deployment_group (str): + Optional. The deployment group can be no longer than 64 + characters (eg: 'test', 'prod'). If not set, we will use the + 'default' deployment group. + + Creating ``deployment_groups`` with ``reserved_ip_ranges`` + is a recommended practice when the peered network has + multiple peering ranges. This creates your deployments from + predictable IP spaces for easier traffic administration. + Also, one deployment_group (except 'default') can only be + used with the same reserved_ip_ranges which means if the + deployment_group has been used with reserved_ip_ranges: [a, + b, c], using it with [a, b] or [d, e] is disallowed. + + Note: we only support up to 5 deployment groups(not + including 'default'). + auth_config_audiences (Sequence[str]): + The list of JWT + `audiences `__. + that are allowed to access. A JWT containing any of these + audiences will be accepted. + + auth_config_audiences and auth_config_allowed_issuers must be passed together. + auth_config_allowed_issuers (Sequence[str]): + A list of allowed JWT issuers. Each entry must be a valid + Google service account, in the following format: + + ``service-account-name@project-id.iam.gserviceaccount.com`` + + auth_config_audiences and auth_config_allowed_issuers must be passed together. + request_metadata (Sequence[Tuple[str, str]]): + Optional. Strings which should be sent along with the request as metadata. + sync (bool): + Whether to execute this method synchronously. If False, this method + will be executed in a concurrent Future and any downstream object will + be immediately returned and synced when the Future has completed. + deploy_request_timeout (float): + Optional. The timeout for the request in seconds. psc_automation_configs (Sequence[Tuple[str, str]]): Optional. A list of (project_id, network) pairs for Private Service Connection endpoints to be setup for the deployed index. @@ -1228,6 +1408,8 @@ def deploy_index( [(project_id_1, network_1), (project_id_1, network_2))] will enable PSC automation for the index to be deployed to project_id_1's network_1 and network_2 and can be queried within these networks. + deployment_tier (str): + Optional. The deployment tier that the index is deployed to. Returns: MatchingEngineIndexEndpoint - IndexEndpoint resource object """ @@ -1253,6 +1435,7 @@ def deploy_index( auth_config_audiences=auth_config_audiences, auth_config_allowed_issuers=auth_config_allowed_issuers, psc_automation_configs=psc_automation_configs, + deployment_tier=deployment_tier, ) deploy_lro = self.api_client.deploy_index( @@ -1704,10 +1887,10 @@ def find_neighbors( [ MatchNeighbor( id=neighbor.datapoint.datapoint_id, - distance=neighbor.distance, - sparse_distance=neighbor.sparse_distance - if neighbor.sparse_distance - else None, + distance=neighbor.distance if neighbor.distance else None, + sparse_distance=( + neighbor.sparse_distance if neighbor.sparse_distance else None + ), ).from_index_datapoint(index_datapoint=neighbor.datapoint) for neighbor in embedding_neighbors.neighbors ] @@ -2005,17 +2188,21 @@ def match( approx_num_neighbors=approx_num_neighbors, fraction_leaf_nodes_to_search_override=fraction_leaf_nodes_to_search_override, numeric_restricts=numeric_restricts, - sparse_embedding=match_service_pb2.SparseEmbedding( - float_val=query.sparse_embedding_values, - dimension=query.sparse_embedding_dimensions, - ) - if query_is_hybrid - else None, - rrf=match_service_pb2.MatchRequest.RRF( - alpha=query.rrf_ranking_alpha, - ) - if query_is_hybrid and query.rrf_ranking_alpha - else None, + sparse_embedding=( + match_service_pb2.SparseEmbedding( + float_val=query.sparse_embedding_values, + dimension=query.sparse_embedding_dimensions, + ) + if query_is_hybrid + else None + ), + rrf=( + match_service_pb2.MatchRequest.RRF( + alpha=query.rrf_ranking_alpha, + ) + if query_is_hybrid and query.rrf_ranking_alpha + else None + ), ) requests.append(request) else: @@ -2036,19 +2223,18 @@ def match( # Wrap the results in MatchNeighbor objects and return match_neighbors_response = [] for resp in response.responses[0].responses: - match_neighbors_id_map = {} + embedding_map = {embedding.id: embedding for embedding in resp.embeddings} + neighbors_list = [] for neighbor in resp.neighbor: - match_neighbors_id_map[neighbor.id] = MatchNeighbor( + match_neighbor = MatchNeighbor( id=neighbor.id, - distance=neighbor.distance, - sparse_distance=neighbor.sparse_distance - if neighbor.sparse_distance - else None, + distance=neighbor.distance if neighbor.distance else None, + sparse_distance=( + neighbor.sparse_distance if neighbor.sparse_distance else None + ), ) - for embedding in resp.embeddings: - if embedding.id in match_neighbors_id_map: - match_neighbors_id_map[embedding.id] = match_neighbors_id_map[ - embedding.id - ].from_embedding(embedding=embedding) - match_neighbors_response.append(list(match_neighbors_id_map.values())) + if neighbor.id in embedding_map: + match_neighbor.from_embedding(embedding=embedding_map[neighbor.id]) + neighbors_list.append(match_neighbor) + match_neighbors_response.append(neighbors_list) return match_neighbors_response diff --git a/google/cloud/aiplatform/metadata/_models.py b/google/cloud/aiplatform/metadata/_models.py index 0eb38e0ddb..583433d28b 100644 --- a/google/cloud/aiplatform/metadata/_models.py +++ b/google/cloud/aiplatform/metadata/_models.py @@ -22,7 +22,6 @@ from typing import Any, Dict, Optional, Sequence, Union from google.auth import credentials as auth_credentials -from google.cloud import storage from google.cloud import aiplatform from google.cloud.aiplatform import base from google.cloud.aiplatform import explain @@ -148,6 +147,12 @@ def _load_sklearn_model( f"You are using sklearn {sklearn.__version__}." "Attempting to load model..." ) + + _LOGGER.warning( + "Loading a scikit-learn model via pickle is insecure. " + "Ensure the model artifact is from a trusted source.", + ) + with open(model_file, "rb") as f: sk_model = pickle.load(f) @@ -371,6 +376,7 @@ def save_model( project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, + staging_bucket: Optional[str] = None, ) -> google_artifact_schema.ExperimentModel: """Saves a ML model into a MLMD artifact. @@ -418,12 +424,18 @@ def save_model( credentials (auth_credentials.Credentials): Optional. Custom credentials used to create this Artifact. Overrides credentials set in aiplatform.init. + staging_bucket (str): + Optional. The staging bucket used to save the model. If not provided, + the staging bucket set in aiplatform.init will be used. A staging + bucket or uri is required for saving a model. Returns: An ExperimentModel instance. Raises: ValueError: if model type is not supported. + RuntimeError: If staging bucket was not set using aiplatform.init + and a staging bucket or uri was not passed in. """ framework_name = framework_version = "" try: @@ -438,6 +450,16 @@ def save_model( ) and model.__class__.__module__.startswith("sklearn"): framework_name = "sklearn" framework_version = sklearn.__version__ + try: + import sklearn.v1_0_2 + except ImportError: + pass + else: + if isinstance( + model, sklearn.v1_0_2.base.BaseEstimator + ) and model.__class__.__module__.startswith("sklearn"): + framework_name = "sklearn" + framework_version = sklearn.v1_0_2.__version__ try: import xgboost as xgb @@ -466,24 +488,13 @@ def save_model( model_file = _FRAMEWORK_SPECS[framework_name]["model_file"] if not uri: - staging_bucket = initializer.global_config.staging_bucket - # TODO(b/264196887) + staging_bucket = staging_bucket or initializer.global_config.staging_bucket + if not staging_bucket: - project = project or initializer.global_config.project - location = location or initializer.global_config.location - credentials = credentials or initializer.global_config.credentials - - staging_bucket_name = project + "-vertex-staging-" + location - client = storage.Client(project=project, credentials=credentials) - staging_bucket = storage.Bucket(client=client, name=staging_bucket_name) - if not staging_bucket.exists(): - _LOGGER.info(f'Creating staging bucket "{staging_bucket_name}"') - staging_bucket = client.create_bucket( - bucket_or_name=staging_bucket, - project=project, - location=location, - ) - staging_bucket = f"gs://{staging_bucket_name}" + raise RuntimeError( + "staging_bucket should be passed to save_model constructor or " + "should be set using aiplatform.init(staging_bucket='gs://my-bucket')" + ) unique_name = utils.timestamped_unique_name() uri = f"{staging_bucket}/{unique_name}-{framework_name}-model" @@ -528,7 +539,7 @@ def save_model( def load_model( - model: Union[str, google_artifact_schema.ExperimentModel] + model: Union[str, google_artifact_schema.ExperimentModel], ) -> Union["sklearn.base.BaseEstimator", "xgb.Booster", "tf.Module"]: # noqa: F821 """Retrieves the original ML model from an ExperimentModel resource. @@ -843,7 +854,7 @@ def register_model( def get_experiment_model_info( - model: Union[str, google_artifact_schema.ExperimentModel] + model: Union[str, google_artifact_schema.ExperimentModel], ) -> Dict[str, Any]: """Get the model's info from an experiment model artifact. diff --git a/google/cloud/aiplatform/metadata/execution.py b/google/cloud/aiplatform/metadata/execution.py index 27ef4d96b9..78846646c9 100644 --- a/google/cloud/aiplatform/metadata/execution.py +++ b/google/cloud/aiplatform/metadata/execution.py @@ -324,9 +324,9 @@ def _add_artifact( events = [ gca_event.Event( artifact=artifact_resource_name, - type_=gca_event.Event.Type.INPUT - if input - else gca_event.Event.Type.OUTPUT, + type_=( + gca_event.Event.Type.INPUT if input else gca_event.Event.Type.OUTPUT + ), ) for artifact_resource_name in artifact_resource_names ] diff --git a/google/cloud/aiplatform/metadata/experiment_resources.py b/google/cloud/aiplatform/metadata/experiment_resources.py index 0d1561eac3..c9490361a0 100644 --- a/google/cloud/aiplatform/metadata/experiment_resources.py +++ b/google/cloud/aiplatform/metadata/experiment_resources.py @@ -316,6 +316,7 @@ def get_or_create( def list( cls, *, + filter: Optional[str] = None, project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, @@ -327,6 +328,8 @@ def list( ``` Args: + filter (str): + Optional. A query to filter available resources for matching results. project (str): Optional. Project to list these experiments from. Overrides project set in aiplatform.init. @@ -343,6 +346,8 @@ def list( filter_str = metadata_utils._make_filter_string( schema_title=constants.SYSTEM_EXPERIMENT ) + if filter: + filter_str = f"{filter_str} AND ({filter})" with _SetLoggerLevel(resource): experiment_contexts = context.Context.list( diff --git a/google/cloud/aiplatform/metadata/experiment_run_resource.py b/google/cloud/aiplatform/metadata/experiment_run_resource.py index 95a3d616b6..8da82c2f52 100644 --- a/google/cloud/aiplatform/metadata/experiment_run_resource.py +++ b/google/cloud/aiplatform/metadata/experiment_run_resource.py @@ -1196,6 +1196,7 @@ def log_model( project: Optional[str] = None, location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, + staging_bucket: Optional[str] = None, ) -> google_artifact_schema.ExperimentModel: """Saves a ML model into a MLMD artifact and log it to this ExperimentRun. @@ -1245,12 +1246,18 @@ def log_model( credentials (auth_credentials.Credentials): Optional. Custom credentials used to create this Artifact. Overrides credentials set in aiplatform.init. + staging_bucket (str): + Optional. The staging bucket used to save the model. If not provided, + the staging bucket set in aiplatform.init will be used. A staging + bucket or uri is required for saving a model. Returns: An ExperimentModel instance. Raises: ValueError: if model type is not supported. + RuntimeError: If staging bucket was not set using aiplatform.init + and a staging bucket or uri was not passed in. """ experiment_model = _models.save_model( model=model, @@ -1262,6 +1269,7 @@ def log_model( project=project, location=location, credentials=credentials, + staging_bucket=staging_bucket, ) self._metadata_node.add_artifacts_and_executions( diff --git a/google/cloud/aiplatform/metadata/metadata.py b/google/cloud/aiplatform/metadata/metadata.py index facb4f698c..ebe1407776 100644 --- a/google/cloud/aiplatform/metadata/metadata.py +++ b/google/cloud/aiplatform/metadata/metadata.py @@ -303,6 +303,7 @@ def set_experiment( ] = None, project: Optional[str] = None, location: Optional[str] = None, + display_button: bool = True, ): """Set the experiment. Will retrieve the Experiment if it exists or create one with the provided name. @@ -326,6 +327,8 @@ def set_experiment( location (str): Optional. Location where this experiment will be retrieved from or created. Overrides location set in aiplatform.init. + display_button (bool): + Optional. If set to `True`, displays a button to the experiment in the IPython notebook. """ self.reset() @@ -350,7 +353,8 @@ def set_experiment( if not current_backing_tb and backing_tb: experiment.assign_backing_tensorboard(tensorboard=backing_tb) - _ipython_utils.display_experiment_button(experiment) + if display_button: + _ipython_utils.display_experiment_button(experiment) self._experiment = experiment diff --git a/google/cloud/aiplatform/metadata/metadata_store.py b/google/cloud/aiplatform/metadata/metadata_store.py index ab2e7ec305..e9ea6f421d 100644 --- a/google/cloud/aiplatform/metadata/metadata_store.py +++ b/google/cloud/aiplatform/metadata/metadata_store.py @@ -24,7 +24,9 @@ from google.cloud.aiplatform import base, initializer from google.cloud.aiplatform import compat from google.cloud.aiplatform import utils -from google.cloud.aiplatform.compat.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform.compat.types import ( + metadata_store as gca_metadata_store, +) from google.cloud.aiplatform.constants import base as base_constants diff --git a/google/cloud/aiplatform/metadata/schema/base_artifact.py b/google/cloud/aiplatform/metadata/schema/base_artifact.py index 81225b72fa..316653b511 100644 --- a/google/cloud/aiplatform/metadata/schema/base_artifact.py +++ b/google/cloud/aiplatform/metadata/schema/base_artifact.py @@ -47,7 +47,6 @@ def __init__( metadata: Optional[Dict] = None, state: Optional[gca_artifact.Artifact.State] = gca_artifact.Artifact.State.LIVE, ): - """Initializes the Artifact with the given name, URI and metadata. This is the base class for defining various artifact types, which can be @@ -112,7 +111,6 @@ def _init_with_resource_name( location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, ): - """Initializes the Artifact instance using an existing resource. Args: diff --git a/google/cloud/aiplatform/metadata/schema/base_context.py b/google/cloud/aiplatform/metadata/schema/base_context.py index e9b331095a..d22e3ec2b6 100644 --- a/google/cloud/aiplatform/metadata/schema/base_context.py +++ b/google/cloud/aiplatform/metadata/schema/base_context.py @@ -49,7 +49,6 @@ def __init__( metadata: Optional[Dict] = None, description: Optional[str] = None, ): - """Initializes the Context with the given name, URI and metadata. Args: diff --git a/google/cloud/aiplatform/metadata/schema/base_execution.py b/google/cloud/aiplatform/metadata/schema/base_execution.py index 6076ea204c..c514077c65 100644 --- a/google/cloud/aiplatform/metadata/schema/base_execution.py +++ b/google/cloud/aiplatform/metadata/schema/base_execution.py @@ -52,7 +52,6 @@ def __init__( metadata: Optional[Dict] = None, description: Optional[str] = None, ): - """Initializes the Execution with the given name, URI and metadata. Args: @@ -97,7 +96,6 @@ def _init_with_resource_name( *, execution_name: str, ): - """Initializes the Execution instance using an existing resource. Args: execution_name (str): diff --git a/google/cloud/aiplatform/metadata/schema/google/artifact_schema.py b/google/cloud/aiplatform/metadata/schema/google/artifact_schema.py index 4b8b9e0ed0..d54f2adc73 100644 --- a/google/cloud/aiplatform/metadata/schema/google/artifact_schema.py +++ b/google/cloud/aiplatform/metadata/schema/google/artifact_schema.py @@ -463,9 +463,9 @@ def __init__( if mean_absolute_error: extended_metadata["meanAbsoluteError"] = mean_absolute_error if mean_absolute_percentage_error: - extended_metadata[ - "meanAbsolutePercentageError" - ] = mean_absolute_percentage_error + extended_metadata["meanAbsolutePercentageError"] = ( + mean_absolute_percentage_error + ) if r_squared: extended_metadata["rSquared"] = r_squared if root_mean_squared_log_error: @@ -557,25 +557,25 @@ def __init__( if mean_absolute_error: extended_metadata["meanAbsoluteError"] = mean_absolute_error if mean_absolute_percentage_error: - extended_metadata[ - "meanAbsolutePercentageError" - ] = mean_absolute_percentage_error + extended_metadata["meanAbsolutePercentageError"] = ( + mean_absolute_percentage_error + ) if r_squared: extended_metadata["rSquared"] = r_squared if root_mean_squared_log_error: extended_metadata["rootMeanSquaredLogError"] = root_mean_squared_log_error if weighted_absolute_percentage_error: - extended_metadata[ - "weightedAbsolutePercentageError" - ] = weighted_absolute_percentage_error + extended_metadata["weightedAbsolutePercentageError"] = ( + weighted_absolute_percentage_error + ) if root_mean_squared_percentage_error: - extended_metadata[ - "rootMeanSquaredPercentageError" - ] = root_mean_squared_percentage_error + extended_metadata["rootMeanSquaredPercentageError"] = ( + root_mean_squared_percentage_error + ) if symmetric_mean_absolute_percentage_error: - extended_metadata[ - "symmetricMeanAbsolutePercentageError" - ] = symmetric_mean_absolute_percentage_error + extended_metadata["symmetricMeanAbsolutePercentageError"] = ( + symmetric_mean_absolute_percentage_error + ) super(ForecastingMetrics, self).__init__( uri=uri, diff --git a/google/cloud/aiplatform/model_evaluation/model_evaluation.py b/google/cloud/aiplatform/model_evaluation/model_evaluation.py index e574ecad78..ae8ee06ab7 100644 --- a/google/cloud/aiplatform/model_evaluation/model_evaluation.py +++ b/google/cloud/aiplatform/model_evaluation/model_evaluation.py @@ -107,9 +107,9 @@ def __init__( self._gca_resource = self._get_gca_resource( resource_name=evaluation_name, - parent_resource_name_fields={models.Model._resource_noun: model_id} - if model_id - else model_id, + parent_resource_name_fields=( + {models.Model._resource_noun: model_id} if model_id else model_id + ), ) def delete(self): diff --git a/google/cloud/aiplatform/model_evaluation/model_evaluation_job.py b/google/cloud/aiplatform/model_evaluation/model_evaluation_job.py index 87f4e44b1f..7ce555d3db 100644 --- a/google/cloud/aiplatform/model_evaluation/model_evaluation_job.py +++ b/google/cloud/aiplatform/model_evaluation/model_evaluation_job.py @@ -320,9 +320,9 @@ def submit( if bigquery_source_uri: template_params["batch_predict_predictions_format"] = "bigquery" template_params["batch_predict_bigquery_source_uri"] = bigquery_source_uri - template_params[ - "batch_predict_bigquery_destination_output_uri" - ] = batch_predict_bigquery_destination_output_uri + template_params["batch_predict_bigquery_destination_output_uri"] = ( + batch_predict_bigquery_destination_output_uri + ) elif gcs_source_uris: template_params["batch_predict_gcs_source_uris"] = gcs_source_uris @@ -330,14 +330,14 @@ def submit( template_params["evaluation_class_labels"] = class_labels if prediction_label_column: - template_params[ - "evaluation_prediction_label_column" - ] = prediction_label_column + template_params["evaluation_prediction_label_column"] = ( + prediction_label_column + ) if prediction_score_column: - template_params[ - "evaluation_prediction_score_column" - ] = prediction_score_column + template_params["evaluation_prediction_score_column"] = ( + prediction_score_column + ) # If the user provides a SA, use it for the Dataflow job as well if service_account is not None: diff --git a/google/cloud/aiplatform/model_monitoring/objective.py b/google/cloud/aiplatform/model_monitoring/objective.py index 48e45d734d..cf90b6cba3 100644 --- a/google/cloud/aiplatform/model_monitoring/objective.py +++ b/google/cloud/aiplatform/model_monitoring/objective.py @@ -92,7 +92,9 @@ def __init__( def as_proto( self, - ) -> gca_model_monitoring.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig: + ) -> ( + gca_model_monitoring.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig + ): """Converts _SkewDetectionConfig to a proto message. Returns: @@ -117,9 +119,9 @@ def as_proto( attribution_score_skew_threshold = gca_model_monitoring.ThresholdConfig( value=self.attribute_skew_thresholds[key] ) - attribution_score_skew_thresholds_mapping[ - key - ] = attribution_score_skew_threshold + attribution_score_skew_thresholds_mapping[key] = ( + attribution_score_skew_threshold + ) return gca_model_monitoring.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig( skew_thresholds=skew_thresholds_mapping, attribution_score_skew_thresholds=attribution_score_skew_thresholds_mapping, @@ -153,7 +155,9 @@ def __init__( def as_proto( self, - ) -> gca_model_monitoring.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig: + ) -> ( + gca_model_monitoring.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig + ): """Converts _DriftDetectionConfig to a proto message. Returns: @@ -174,9 +178,9 @@ def as_proto( value=self.attribute_drift_thresholds[key] ) ) - attribution_score_drift_thresholds_mapping[ - key - ] = attribution_score_drift_threshold + attribution_score_drift_thresholds_mapping[key] = ( + attribution_score_drift_threshold + ) return gca_model_monitoring.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig( drift_thresholds=drift_thresholds_mapping, attribution_score_drift_thresholds=attribution_score_drift_thresholds_mapping, @@ -264,15 +268,21 @@ def as_proto(self) -> gca_model_monitoring.ModelMonitoringObjectiveConfig: # TODO(b/242108750): remove temporary logic once model monitoring for batch prediction is GA gapic_config = gca_model_monitoring.ModelMonitoringObjectiveConfig( training_dataset=training_dataset, - training_prediction_skew_detection_config=self.skew_detection_config.as_proto() - if self.skew_detection_config is not None - else None, - prediction_drift_detection_config=self.drift_detection_config.as_proto() - if self.drift_detection_config is not None - else None, - explanation_config=self.explanation_config.as_proto() - if self.explanation_config is not None - else None, + training_prediction_skew_detection_config=( + self.skew_detection_config.as_proto() + if self.skew_detection_config is not None + else None + ), + prediction_drift_detection_config=( + self.drift_detection_config.as_proto() + if self.drift_detection_config is not None + else None + ), + explanation_config=( + self.explanation_config.as_proto() + if self.explanation_config is not None + else None + ), ) if self._config_for_bp: return ( diff --git a/google/cloud/aiplatform/models.py b/google/cloud/aiplatform/models.py index e3773712b9..b12c045548 100644 --- a/google/cloud/aiplatform/models.py +++ b/google/cloud/aiplatform/models.py @@ -62,9 +62,11 @@ deployment_resource_pool as gca_deployment_resource_pool_compat, deployed_model_ref as gca_deployed_model_ref_compat, encryption_spec as gca_encryption_spec, + endpoint_v1beta1 as gca_endpoint_v1beta1_compat, endpoint as gca_endpoint_compat, explanation as gca_explanation_compat, io as gca_io_compat, + machine_resources_v1beta1 as gca_machine_resources_v1beta1_compat, machine_resources as gca_machine_resources_compat, model as gca_model_compat, model_service as gca_model_service_compat, @@ -251,6 +253,7 @@ def create( reservation_affinity_key: Optional[str] = None, reservation_affinity_values: Optional[List[str]] = None, spot: bool = False, + required_replica_count: Optional[int] = 0, ) -> "DeploymentResourcePool": """Creates a new DeploymentResourcePool. @@ -323,6 +326,14 @@ def create( Format: 'projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}' spot (bool): Optional. Whether to schedule the deployment workload on spot VMs. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. Returns: DeploymentResourcePool @@ -353,6 +364,7 @@ def create( spot=spot, sync=sync, create_request_timeout=create_request_timeout, + required_replica_count=required_replica_count, ) @classmethod @@ -378,6 +390,7 @@ def _create( spot: bool = False, sync=True, create_request_timeout: Optional[float] = None, + required_replica_count: Optional[int] = 0, ) -> "DeploymentResourcePool": """Creates a new DeploymentResourcePool. @@ -453,6 +466,14 @@ def _create( when the Future has completed. create_request_timeout (float): Optional. The create request timeout in seconds. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. Returns: DeploymentResourcePool @@ -466,6 +487,7 @@ def _create( min_replica_count=min_replica_count, max_replica_count=max_replica_count, spot=spot, + required_replica_count=required_replica_count, ) machine_spec = gca_machine_resources_compat.MachineSpec( @@ -674,8 +696,6 @@ def __init__( self._gca_resource = gca_endpoint_compat.Endpoint(name=endpoint_name) self.authorized_session = None - self.raw_predict_request_url = None - self.stream_raw_predict_request_url = None @property def _prediction_client(self) -> utils.PredictionClientWithOverride: @@ -763,7 +783,37 @@ def private_service_connect_config( ) -> Optional[gca_service_networking.PrivateServiceConnectConfig]: """The Private Service Connect configuration for this Endpoint.""" self._assert_gca_resource_is_available() - return self._gca_resource.private_service_connect_config + return getattr(self._gca_resource, "private_service_connect_config", None) + + @property + def dedicated_endpoint_dns(self) -> Optional[str]: + """The dedicated endpoint dns for this Endpoint. + + This property is only available if dedicated endpoint is enabled. + If dedicated endpoint is not enabled, this property returns None. + """ + if re.match(r"^projects/.*/endpoints/.*$", self._gca_resource.name): + dedicated_endpoint_dns = getattr( + self._gca_resource, "dedicated_endpoint_dns", None + ) + if self.dedicated_endpoint_enabled and not dedicated_endpoint_dns: + self._sync_gca_resource() + dedicated_endpoint_dns = getattr( + self._gca_resource, "dedicated_endpoint_dns", None + ) + return dedicated_endpoint_dns + return None + + @property + def dedicated_endpoint_enabled(self) -> bool: + """The dedicated endpoint is enabled for this Endpoint. + + This property will be true if dedicated endpoint is enabled. + """ + if re.match(r"^projects/.*/endpoints/.*$", self._gca_resource.name): + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "dedicated_endpoint_enabled", False) + return False @classmethod def create( @@ -1093,8 +1143,6 @@ def _construct_sdk_resource_from_gapic( credentials=credentials, ) endpoint.authorized_session = None - endpoint.raw_predict_request_url = None - endpoint.stream_raw_predict_request_url = None return endpoint @@ -1186,6 +1234,7 @@ def _validate_deploy_args( traffic_split: Optional[Dict[str, int]], traffic_percentage: Optional[int], deployment_resource_pool: Optional[DeploymentResourcePool], + required_replica_count: Optional[int], ): """Helper method to validate deploy arguments. @@ -1233,6 +1282,14 @@ def _validate_deploy_args( are deployed to the same DeploymentResourcePool will be hosted in a shared model server. If provided, will override replica count arguments. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. Raises: ValueError: if Min or Max replica is negative. Traffic percentage > 100 or @@ -1246,6 +1303,8 @@ def _validate_deploy_args( and min_replica_count != 1 or max_replica_count and max_replica_count != 1 + or required_replica_count + and required_replica_count != 0 ): raise ValueError( "Ignoring explicitly specified replica counts, " @@ -1264,6 +1323,8 @@ def _validate_deploy_args( raise ValueError("Min replica cannot be negative.") if max_replica_count < 0: raise ValueError("Max replica cannot be negative.") + if required_replica_count and required_replica_count < 0: + raise ValueError("Required replica cannot be negative.") if accelerator_type: utils.validate_accelerator_type(accelerator_type) @@ -1293,6 +1354,7 @@ def deploy( max_replica_count: int = 1, accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, + gpu_partition_size: Optional[str] = None, tpu_topology: Optional[str] = None, service_account: Optional[str] = None, explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None, @@ -1304,6 +1366,12 @@ def deploy( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, enable_access_logging=False, disable_container_logging: bool = False, deployment_resource_pool: Optional[DeploymentResourcePool] = None, @@ -1313,6 +1381,7 @@ def deploy( spot: bool = False, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, ) -> None: """Deploys a Model to the Endpoint. @@ -1362,6 +1431,8 @@ def deploy( NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 accelerator_count (int): Optional. The number of accelerators to attach to a worker replica. + gpu_partition_size (str): + Optional. The GPU partition Size for Nvidia MIG. tpu_topology (str): Optional. The TPU topology to use for the DeployedModel. Required for CloudTPU multihost deployments. @@ -1396,6 +1467,22 @@ def deploy( Target Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): + Optional. The target number of requests per minute for autoscaling. + If set, the model will be scaled based on the number of requests it receives. + autoscaling_target_dcgm_fi_dev_gpu_util (int): + Optional. Target DCGM metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): + Optional. Target vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): + Optional. Target vLLM metrics for number of inference requests + currently waiting in the queue. + autoscaling_target_pubsub_num_undelivered_messages (int): + Optional. The target number of pubsub undelivered messages for autoscaling. + If set, the model will be scaled based on the pubsub queue size. + autoscaling_pubsub_subscription_labels (Dict[str, str]): + Optional. Monitored resource labels as key value pairs for + metric filtering for pubsub_num_undelivered_messages. enable_access_logging (bool): Whether to enable endpoint access logging. Defaults to False. disable_container_logging (bool): @@ -1428,6 +1515,14 @@ def deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. """ self._sync_gca_resource_if_skipped() @@ -1439,6 +1534,7 @@ def deploy( traffic_split=traffic_split, traffic_percentage=traffic_percentage, deployment_resource_pool=deployment_resource_pool, + required_replica_count=required_replica_count, ) explanation_spec = _explanation_utils.create_and_validate_explanation_spec( @@ -1456,6 +1552,7 @@ def deploy( max_replica_count=max_replica_count, accelerator_type=accelerator_type, accelerator_count=accelerator_count, + gpu_partition_size=gpu_partition_size, tpu_topology=tpu_topology, reservation_affinity_type=reservation_affinity_type, reservation_affinity_key=reservation_affinity_key, @@ -1467,12 +1564,19 @@ def deploy( deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, spot=spot, enable_access_logging=enable_access_logging, disable_container_logging=disable_container_logging, deployment_resource_pool=deployment_resource_pool, fast_tryout_enabled=fast_tryout_enabled, system_labels=system_labels, + required_replica_count=required_replica_count, ) @base.optional_sync() @@ -1487,6 +1591,7 @@ def _deploy( max_replica_count: int = 1, accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, + gpu_partition_size: Optional[str] = None, tpu_topology: Optional[str] = None, reservation_affinity_type: Optional[str] = None, reservation_affinity_key: Optional[str] = None, @@ -1498,12 +1603,19 @@ def _deploy( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, spot: bool = False, enable_access_logging=False, disable_container_logging: bool = False, deployment_resource_pool: Optional[DeploymentResourcePool] = None, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, ) -> None: """Deploys a Model to the Endpoint. @@ -1553,6 +1665,8 @@ def _deploy( NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 accelerator_count (int): Optional. The number of accelerators to attach to a worker replica. + gpu_partition_size (str): + Optional. The GPU partition size for NVidia MIG. tpu_topology (str): Optional. The TPU topology to use for the DeployedModel. Required for CloudTPU multihost deployments. @@ -1593,6 +1707,22 @@ def _deploy( Target Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): + Optional. The target number of requests per minute for autoscaling. + If set, the model will be scaled based on the number of requests it receives. + autoscaling_target_dcgm_fi_dev_gpu_util (int): + Optional. Target DCGM metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): + Optional. Target vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): + Optional. Target vLLM metrics for number of inference requests + currently waiting in the queue. + autoscaling_target_pubsub_num_undelivered_messages (int): + Optional. The target number of pubsub undelivered messages for autoscaling. + If set, the model will be scaled based on the pubsub queue size. + autoscaling_pubsub_subscription_labels (Dict[str, str]): + Optional. Monitored resource labels as key value pairs for + metric filtering for pubsub_num_undelivered_messages. spot (bool): Optional. Whether to schedule the deployment workload on spot VMs. enable_access_logging (bool): @@ -1613,6 +1743,14 @@ def _deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. """ _LOGGER.log_action_start_against_resource( f"Deploying Model {model.resource_name} to", "", self @@ -1632,6 +1770,7 @@ def _deploy( max_replica_count=max_replica_count, accelerator_type=accelerator_type, accelerator_count=accelerator_count, + gpu_partition_size=gpu_partition_size, tpu_topology=tpu_topology, reservation_affinity_type=reservation_affinity_type, reservation_affinity_key=reservation_affinity_key, @@ -1642,12 +1781,19 @@ def _deploy( deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, spot=spot, enable_access_logging=enable_access_logging, disable_container_logging=disable_container_logging, deployment_resource_pool=deployment_resource_pool, fast_tryout_enabled=fast_tryout_enabled, system_labels=system_labels, + required_replica_count=required_replica_count, ) _LOGGER.log_action_completed_against_resource("model", "deployed", self) @@ -1670,6 +1816,7 @@ def _deploy_call( max_replica_count: int = 1, accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, + gpu_partition_size: Optional[str] = None, tpu_topology: Optional[str] = None, reservation_affinity_type: Optional[str] = None, reservation_affinity_key: Optional[str] = None, @@ -1680,12 +1827,19 @@ def _deploy_call( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, spot: bool = False, enable_access_logging=False, disable_container_logging: bool = False, deployment_resource_pool: Optional[DeploymentResourcePool] = None, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, ) -> None: """Helper method to deploy model to endpoint. @@ -1745,6 +1899,8 @@ def _deploy_call( NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 accelerator_count (int): Optional. The number of accelerators to attach to a worker replica. + gpu_partition_size (str): + Optional. The GPU partition Size for Nvidia MIG. tpu_topology (str): Optional. The TPU topology to use for the DeployedModel. Required for CloudTPU multihost deployments. @@ -1782,6 +1938,20 @@ def _deploy_call( Optional. Target Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): + Optional. Target request count per minute per instance. + autoscaling_target_dcgm_fi_dev_gpu_util (int): + Optional. Target DCGM metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): + Optional. Target vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): + Optional. Target vLLM metrics for number of inference requests + currently waiting in the queue. + autoscaling_target_pubsub_num_undelivered_messages (int): + Optional. Target pubsub queue size per instance. + autoscaling_pubsub_subscription_labels (Dict[str, str]): + Optional. Monitored resource labels as key value pairs for + metric filtering for pubsub_num_undelivered_messages. spot (bool): Optional. Whether to schedule the deployment workload on spot VMs. enable_access_logging (bool): @@ -1802,6 +1972,14 @@ def _deploy_call( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. Raises: ValueError: If only `accelerator_type` or `accelerator_count` is specified. @@ -1813,15 +1991,35 @@ def _deploy_call( ValueError: If both `explanation_spec` and `deployment_resource_pool` are present. """ + # The two features are incompatible due to API versioning issue. + # TODO(b/436626409) after adding the disable_container_logging to v1 proto + # remove the incomaptiblity check. + if gpu_partition_size and disable_container_logging: + _LOGGER.warning( + "Cannot set both gpu_partition_size and disable_container_logging. disable_container_logging will be ignored." + ) + + gca_endpoint = gca_endpoint_compat + gca_machine_resources = gca_machine_resources_compat + if gpu_partition_size: + gca_machine_resources = gca_machine_resources_v1beta1_compat + gca_endpoint = gca_endpoint_v1beta1_compat + api_client = api_client.select_version("v1beta1") service_account = service_account or initializer.global_config.service_account if deployment_resource_pool: - deployed_model = gca_endpoint_compat.DeployedModel( + deployed_model = gca_endpoint.DeployedModel( model=model.versioned_resource_name, display_name=deployed_model_display_name, service_account=service_account, - disable_container_logging=disable_container_logging, ) + if not gpu_partition_size: + deployed_model = gca_endpoint.DeployedModel( + model=model.versioned_resource_name, + display_name=deployed_model_display_name, + service_account=service_account, + disable_container_logging=disable_container_logging, + ) if system_labels: deployed_model.system_labels = system_labels @@ -1843,15 +2041,28 @@ def _deploy_call( or accelerator_count or autoscaling_target_accelerator_duty_cycle or autoscaling_target_cpu_utilization + or autoscaling_target_request_count_per_minute + or autoscaling_target_dcgm_fi_dev_gpu_util + or autoscaling_target_vllm_gpu_cache_usage_perc + or autoscaling_target_vllm_num_requests_waiting + or autoscaling_target_pubsub_num_undelivered_messages + or autoscaling_pubsub_subscription_labels ) if provided_custom_machine_spec: raise ValueError( "Conflicting parameters in deployment request. " - "The machine_type, accelerator_type and accelerator_count," - "autoscaling_target_accelerator_duty_cycle," - "autoscaling_target_cpu_utilization parameters may not be set " - "when `deployment_resource_pool` is specified." + "The machine_type, accelerator_type and accelerator_count, " + "autoscaling_target_accelerator_duty_cycle, " + "autoscaling_target_cpu_utilization, " + "autoscaling_target_request_count_per_minute, " + "autoscaling_target_dcgm_fi_dev_gpu_util, " + "autoscaling_target_vllm_gpu_cache_usage_perc, " + "autoscaling_target_vllm_num_requests_waiting, " + "autoscaling_target_pubsub_num_undelivered_messages, " + "autoscaling_pubsub_subscription_labels parameters " + "may not be set when `deployment_resource_pool` is " + "specified." ) deployed_model.shared_resources = deployment_resource_pool.resource_name @@ -1876,14 +2087,20 @@ def _deploy_call( "Both `accelerator_type` and `accelerator_count` should be set " "when specifying autoscaling_target_accelerator_duty_cycle`" ) - - deployed_model = gca_endpoint_compat.DeployedModel( + deployed_model = gca_endpoint.DeployedModel( model=model.versioned_resource_name, display_name=deployed_model_display_name, service_account=service_account, enable_access_logging=enable_access_logging, - disable_container_logging=disable_container_logging, ) + if not gpu_partition_size: + deployed_model = gca_endpoint.DeployedModel( + model=model.versioned_resource_name, + display_name=deployed_model_display_name, + service_account=service_account, + enable_access_logging=enable_access_logging, + disable_container_logging=disable_container_logging, + ) if system_labels: deployed_model.system_labels = system_labels @@ -1902,6 +2119,12 @@ def _deploy_call( or accelerator_count or autoscaling_target_accelerator_duty_cycle or autoscaling_target_cpu_utilization + or autoscaling_target_request_count_per_minute + or autoscaling_target_dcgm_fi_dev_gpu_util + or autoscaling_target_vllm_gpu_cache_usage_perc + or autoscaling_target_vllm_num_requests_waiting + or autoscaling_target_pubsub_num_undelivered_messages + or autoscaling_pubsub_subscription_labels ) # If the model supports both automatic and dedicated deployment resources, @@ -1913,9 +2136,16 @@ def _deploy_call( if provided_custom_machine_spec and not use_dedicated_resources: _LOGGER.info( "Model does not support dedicated deployment resources. " - "The machine_type, accelerator_type and accelerator_count," - "autoscaling_target_accelerator_duty_cycle," - "autoscaling_target_cpu_utilization parameters are ignored." + "The machine_type, accelerator_type and accelerator_count, " + "autoscaling_target_accelerator_duty_cycle, " + "autoscaling_target_cpu_utilization, " + "autoscaling_target_request_count_per_minute, " + "autoscaling_target_dcgm_fi_dev_gpu_util, " + "autoscaling_target_vllm_gpu_cache_usage_perc, " + "autoscaling_target_vllm_num_requests_waiting, " + "autoscaling_target_pubsub_num_undelivered_messages, " + "autoscaling_pubsub_subscription_labels parameters " + "are ignored." ) if use_dedicated_resources and not machine_type: @@ -1923,18 +2153,19 @@ def _deploy_call( _LOGGER.info(f"Using default machine_type: {machine_type}") if use_dedicated_resources: - dedicated_resources = gca_machine_resources_compat.DedicatedResources( + dedicated_resources = gca_machine_resources.DedicatedResources( min_replica_count=min_replica_count, max_replica_count=max_replica_count, spot=spot, + required_replica_count=required_replica_count, ) - machine_spec = gca_machine_resources_compat.MachineSpec( + machine_spec = gca_machine_resources.MachineSpec( machine_type=machine_type ) if autoscaling_target_cpu_utilization: - autoscaling_metric_spec = gca_machine_resources_compat.AutoscalingMetricSpec( + autoscaling_metric_spec = gca_machine_resources.AutoscalingMetricSpec( metric_name="aiplatform.googleapis.com/prediction/online/cpu/utilization", target=autoscaling_target_cpu_utilization, ) @@ -1948,7 +2179,7 @@ def _deploy_call( machine_spec.accelerator_count = accelerator_count if autoscaling_target_accelerator_duty_cycle: - autoscaling_metric_spec = gca_machine_resources_compat.AutoscalingMetricSpec( + autoscaling_metric_spec = gca_machine_resources.AutoscalingMetricSpec( metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle", target=autoscaling_target_accelerator_duty_cycle, ) @@ -1956,6 +2187,78 @@ def _deploy_call( [autoscaling_metric_spec] ) + if gpu_partition_size: + machine_spec.gpu_partition_size = gpu_partition_size + + if autoscaling_target_request_count_per_minute: + autoscaling_metric_spec = ( + gca_machine_resources.AutoscalingMetricSpec( + metric_name=( + "aiplatform.googleapis.com/prediction/online/" + "request_count" + ), + target=autoscaling_target_request_count_per_minute, + ) + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + + if autoscaling_target_dcgm_fi_dev_gpu_util: + autoscaling_metric_spec = ( + gca_machine_resources_compat.AutoscalingMetricSpec( + metric_name=( + "prometheus.googleapis.com/" + "vertex_dcgm_fi_dev_gpu_util" + ), + target=autoscaling_target_dcgm_fi_dev_gpu_util, + ) + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + + if autoscaling_target_vllm_gpu_cache_usage_perc: + autoscaling_metric_spec = ( + gca_machine_resources_compat.AutoscalingMetricSpec( + metric_name=( + "prometheus.googleapis.com/" + "vertex_vllm_gpu_cache_usage_perc" + ), + target=autoscaling_target_vllm_gpu_cache_usage_perc, + ) + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + + if autoscaling_target_vllm_num_requests_waiting: + autoscaling_metric_spec = ( + gca_machine_resources_compat.AutoscalingMetricSpec( + metric_name=( + "prometheus.googleapis.com/" + "vertex_vllm_num_requests_waiting" + ), + target=autoscaling_target_vllm_num_requests_waiting, + ) + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + + if autoscaling_target_pubsub_num_undelivered_messages: + autoscaling_metric_spec = gca_machine_resources.AutoscalingMetricSpec( + metric_name=( + "pubsub.googleapis.com/subscription/" + "num_undelivered_messages" + ), + target=autoscaling_target_pubsub_num_undelivered_messages, + monitored_resource_labels=autoscaling_pubsub_subscription_labels, + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + if reservation_affinity_type: machine_spec.reservation_affinity = utils.get_reservation_affinity( reservation_affinity_type, @@ -1970,14 +2273,14 @@ def _deploy_call( deployed_model.dedicated_resources = dedicated_resources if fast_tryout_enabled: deployed_model.faster_deployment_config = ( - gca_endpoint_compat.FasterDeploymentConfig( + gca_endpoint.FasterDeploymentConfig( fast_tryout_enabled=fast_tryout_enabled ) ) elif supports_automatic_resources: deployed_model.automatic_resources = ( - gca_machine_resources_compat.AutomaticResources( + gca_machine_resources.AutomaticResources( min_replica_count=min_replica_count, max_replica_count=max_replica_count, ) @@ -2251,10 +2554,9 @@ def predict( ) -> Prediction: """Make a prediction against this Endpoint. - For dedicated endpoint, set use_dedicated_endpoint = True: + Example usage: ``` - response = my_endpoint.predict(instances=[...], - use_dedicated_endpoint=True) + response = my_endpoint.predict(instances=[...]) my_predictions = response.predictions ``` @@ -2289,11 +2591,22 @@ def predict( Returns: prediction (aiplatform.Prediction): Prediction with returned predictions and Model ID. + + Raises: + ImportError: If there is an issue importing the `TCPKeepAliveAdapter` package. + ValueError: If the dedicated endpoint DNS is empty for dedicated endpoints. + ValueError: If the prediction request fails for dedicated endpoints. """ self.wait() + + if parameters is not None: + data = json.dumps({"instances": instances, "parameters": parameters}) + else: + data = json.dumps({"instances": instances}) + if use_raw_predict: raw_predict_response = self.raw_predict( - body=json.dumps({"instances": instances, "parameters": parameters}), + body=data, headers={"Content-Type": "application/json"}, use_dedicated_endpoint=use_dedicated_endpoint, timeout=timeout, @@ -2313,52 +2626,7 @@ def predict( ), ) - if use_dedicated_endpoint: - self._sync_gca_resource_if_skipped() - if ( - not self._gca_resource.dedicated_endpoint_enabled - or self._gca_resource.dedicated_endpoint_dns is None - ): - raise ValueError( - "Dedicated endpoint is not enabled or DNS is empty." - "Please make sure endpoint has dedicated endpoint enabled" - "and model are ready before making a prediction." - ) - - if not self.authorized_session: - self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES - self.authorized_session = google_auth_requests.AuthorizedSession( - self.credentials - ) - - headers = { - "Content-Type": "application/json", - } - - url = f"https://{self._gca_resource.dedicated_endpoint_dns}/v1/{self.resource_name}:predict" - response = self.authorized_session.post( - url=url, - data=json.dumps( - { - "instances": instances, - "parameters": parameters, - } - ), - headers=headers, - timeout=timeout, - ) - - prediction_response = json.loads(response.text) - - return Prediction( - predictions=prediction_response.get("predictions"), - metadata=prediction_response.get("metadata"), - deployed_model_id=prediction_response.get("deployedModelId"), - model_resource_name=prediction_response.get("model"), - model_version_id=prediction_response.get("modelVersionId"), - ) - - else: + if not self.dedicated_endpoint_enabled: prediction_response = self._prediction_client.predict( endpoint=self._gca_resource.name, instances=instances, @@ -2381,6 +2649,58 @@ def predict( model_resource_name=prediction_response.model, ) + if not self.dedicated_endpoint_dns: + raise ValueError( + "Dedicated endpoint DNS is empty. Please make sure endpoint" + "and model are ready before making a prediction." + ) + + if not self.authorized_session: + self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES + self.authorized_session = google_auth_requests.AuthorizedSession( + self.credentials + ) + + if timeout is not None and timeout > google_auth_requests._DEFAULT_TIMEOUT: + try: + from requests_toolbelt.adapters.socket_options import ( + TCPKeepAliveAdapter, + ) + except ImportError: + raise ImportError( + "Cannot import the requests-toolbelt library." + "Please install requests-toolbelt." + ) + # count * interval need to be larger than 1 hr (3600s) + keep_alive = TCPKeepAliveAdapter(idle=120, count=100, interval=100) + self.authorized_session.mount("https://", keep_alive) + + url = f"https://{self.dedicated_endpoint_dns}/v1/{self.resource_name}:predict" + headers = { + "Content-Type": "application/json", + } + response = self.authorized_session.post( + url=url, + data=data, + headers=headers, + timeout=timeout, + ) + + if response.status_code != 200: + raise ValueError( + f"Failed to make prediction request. Status code:" + f"{response.status_code}, response: {response.text}." + ) + prediction_response = json.loads(response.text) + + return Prediction( + predictions=prediction_response.get("predictions"), + metadata=prediction_response.get("metadata"), + deployed_model_id=prediction_response.get("deployedModelId"), + model_resource_name=prediction_response.get("model"), + model_version_id=prediction_response.get("modelVersionId"), + ) + async def predict_async( self, instances: List, @@ -2461,12 +2781,6 @@ def raw_predict( body = b'{"instances":[{"feat_1":val_1, "feat_2":val_2}]}' headers = {'Content-Type':'application/json'} ) - # For dedicated endpoint: - response = my_endpoint.raw_predict( - body = b'{"instances":[{"feat_1":val_1, "feat_2":val_2}]}', - headers = {'Content-Type':'application/json'}, - dedicated_endpoint=True, - ) status_code = response.status_code results = json.dumps(response.text) @@ -2482,6 +2796,9 @@ def raw_predict( Returns: A requests.models.Response object containing the status code and prediction results. + + Raises: + ImportError: If there is an issue importing the `TCPKeepAliveAdapter` package. """ if not self.authorized_session: self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES @@ -2489,23 +2806,30 @@ def raw_predict( self.credentials ) - if self.raw_predict_request_url is None: - self.raw_predict_request_url = f"https://{self.location}-{constants.base.API_BASE_PATH}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}:rawPredict" - - url = self.raw_predict_request_url - - if use_dedicated_endpoint: - self._sync_gca_resource_if_skipped() - if ( - not self._gca_resource.dedicated_endpoint_enabled - or self._gca_resource.dedicated_endpoint_dns is None - ): + if self.dedicated_endpoint_enabled: + if not self.dedicated_endpoint_dns: raise ValueError( - "Dedicated endpoint is not enabled or DNS is empty." - "Please make sure endpoint has dedicated endpoint enabled" + "Dedicated endpoint DNS is empty. Please make sure endpoint" "and model are ready before making a prediction." ) - url = f"https://{self._gca_resource.dedicated_endpoint_dns}/v1/{self.resource_name}:rawPredict" + url = f"https://{self.dedicated_endpoint_dns}/v1/{self.resource_name}:rawPredict" + + if timeout is not None and timeout > google_auth_requests._DEFAULT_TIMEOUT: + try: + from requests_toolbelt.adapters.socket_options import ( + TCPKeepAliveAdapter, + ) + except ImportError: + raise ImportError( + "Cannot import the requests-toolbelt library." + "Please install requests-toolbelt." + ) + # count * interval need to be larger than 1 hr (3600s) + keep_alive = TCPKeepAliveAdapter(idle=120, count=100, interval=100) + self.authorized_session.mount("https://", keep_alive) + else: + url = f"https://{self.location}-{constants.base.API_BASE_PATH}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}:rawPredict" + return self.authorized_session.post( url=url, data=body, headers=headers, timeout=timeout ) @@ -2519,6 +2843,7 @@ def stream_raw_predict( timeout: Optional[float] = None, ) -> Iterator[requests.models.Response]: """Makes a streaming prediction request using arbitrary headers. + For custom model, this method is only supported for dedicated endpoint. Example usage: ``` @@ -2531,18 +2856,6 @@ def stream_raw_predict( stream_result = json.dumps(response.text) ``` - For dedicated endpoint: - ``` - my_endpoint = aiplatform.Endpoint(ENDPOINT_ID) - for stream_response in my_endpoint.stream_raw_predict( - body = b'{"instances":[{"feat_1":val_1, "feat_2":val_2}]}', - headers = {'Content-Type':'application/json'}, - use_dedicated_endpoint=True, - ): - status_code = response.status_code - stream_result = json.dumps(response.text) - ``` - Args: body (bytes): The body of the prediction request in bytes. This must not @@ -2565,23 +2878,15 @@ def stream_raw_predict( self.credentials ) - if self.stream_raw_predict_request_url is None: - self.stream_raw_predict_request_url = f"https://{self.location}-{constants.base.API_BASE_PATH}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}:streamRawPredict" - - url = self.raw_predict_request_url - - if use_dedicated_endpoint: - self._sync_gca_resource_if_skipped() - if ( - not self._gca_resource.dedicated_endpoint_enabled - or self._gca_resource.dedicated_endpoint_dns is None - ): + if self.dedicated_endpoint_enabled: + if not self.dedicated_endpoint_dns: raise ValueError( - "Dedicated endpoint is not enabled or DNS is empty." - "Please make sure endpoint has dedicated endpoint enabled" + "Dedicated endpoint DNS is empty. Please make sure endpoint" "and model are ready before making a prediction." ) - url = f"https://{self._gca_resource.dedicated_endpoint_dns}/v1/{self.resource_name}:streamRawPredict" + url = f"https://{self.dedicated_endpoint_dns}/v1/{self.resource_name}:streamRawPredict" + else: + url = f"https://{self.location}-{constants.base.API_BASE_PATH}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}:streamRawPredict" with self.authorized_session.post( url=url, @@ -3036,6 +3341,151 @@ async def explain_async( explanations=explain_response.explanations, ) + def invoke( + self, + request_path: str, + body: bytes, + headers: Dict[str, str], + deployed_model_id: Optional[str] = None, + stream: bool = False, + timeout: Optional[float] = None, + ) -> Union[requests.models.Response, Iterator[requests.models.Response]]: + """Makes a prediction request for arbitrary paths. + + Example usage: + my_endpoint = aiplatform.Endpoint(ENDPOINT_ID) + # Unary request + body = { + "model": "", + "messages": [ + { + "role": "user", + "content": "Hello!", + } + ], + } + + response = my_endpoint.invoke( + request_path="/v1/chat/completions", + body = json.dumps(body).encode("utf-8"), + headers = {'Content-Type':'application/json'}, + ) + status_code = response.status_code + results = json.dumps(response.text) + + # Streaming request + body = { + "model": "", + "messages": [ + { + "role": "user", + "content": "Hello!", + } + ], + "stream": "true", + } + + for chunk in my_endpoint.invoke( + request_path="/v1/chat/completions", + body = json.dumps(body).encode("utf-8"), + headers = {'Content-Type':'application/json'}, + stream=True, + ): + chunk_text = chunk.decode('utf-8') + + Args: + request_path (str): + The request url to the model server. The request path must be + a string that starts with a forward slash. Root can't be + accessed. + + body (bytes): + The body of the prediction request in bytes. This must not exceed 1.5 mb per request. + + headers (Dict[str, str]): + The header of the request as a dictionary. There are no restrictions on the header. + + deployed_model_id (str): + Optional. If specified, this InvokeRequest will be served by the + chosen DeployedModel, overriding this Endpoint's traffic split. + + stream (bool): If set to True, streaming will be enabled. + + timeout (float): Optional. The timeout for this request in seconds. + + Returns: + By default, a requests.models.Response object containing the status code and prediction results is returned. + For stream=True, the response will be of type Iterator[requests.models.Response]. + + Raises: + ImportError: If there is an issue importing the `TCPKeepAliveAdapter` package. + """ + if not self.authorized_session: + self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES + self.authorized_session = google_auth_requests.AuthorizedSession( + self.credentials + ) + if not self.dedicated_endpoint_enabled: + raise ValueError( + "Invoke method is only supported on dedicated endpoints. Please" + "make sure endpoint and model are correctly configured." + ) + if self.dedicated_endpoint_dns is None: + raise ValueError( + "Dedicated endpoint DNS is empty. Please make sure endpoint" + "and model are ready before making a prediction." + ) + if len(request_path) < 0 or request_path[0] != "/": + raise ValueError( + "container path must be a string that starts with a forward slash." + ) + url = f"https://{self.dedicated_endpoint_dns}/v1/{self.resource_name}" + + if deployed_model_id: + deployed_model_ids = set() + if hasattr(self._gca_resource, "deployed_models"): + for deployed_model in self._gca_resource.deployed_models: + deployed_model_ids.add(deployed_model.id) + if deployed_model_id not in deployed_model_ids: + raise ValueError( + f"Deployed model {deployed_model_id} not found in endpoint" + f" {self.name}." + ) + url += f"/deployedModels/{deployed_model_id}" + url += "/invoke" + request_path + if timeout is not None and timeout > google_auth_requests._DEFAULT_TIMEOUT: + try: + from requests_toolbelt.adapters.socket_options import ( + TCPKeepAliveAdapter, + ) + except ImportError: + raise ImportError( + "Cannot import the requests-toolbelt library." + "Please install requests-toolbelt." + ) + # count * interval need to be larger than 1 hr (3600s) + keep_alive = TCPKeepAliveAdapter(idle=120, count=100, interval=100) + self.authorized_session.mount("https://", keep_alive) + + def invoke_stream_response(): + with self.authorized_session.post( + url=url, + data=body, + headers=headers, + timeout=timeout, + stream=True, + ) as resp: + for line in resp.iter_lines(): + yield line + + if stream: + # This wrapping allows a Response object is returned for + # non-streaming requests. + return invoke_stream_response() + return self.authorized_session.post( + url=url, data=body, headers=headers, timeout=timeout + ) + @classmethod def list( cls, @@ -3215,6 +3665,7 @@ def __init__( ) self._http_client = urllib3.PoolManager(cert_reqs="CERT_NONE") + self._authorized_session = None @property def predict_http_uri(self) -> Optional[str]: @@ -3237,10 +3688,14 @@ def health_http_uri(self) -> Optional[str]: return None return self._gca_resource.deployed_models[0].private_endpoints.health_http_uri + # PrivateServiceConnectConfig is deprecated. + # Use service_networking.PrivateServiceConnectConfig instead. class PrivateServiceConnectConfig: """Represents a Vertex AI PrivateServiceConnectConfig resource.""" - _gapic_private_service_connect_config: gca_service_networking.PrivateServiceConnectConfig + _gapic_private_service_connect_config: ( + gca_service_networking.PrivateServiceConnectConfig + ) def __init__( self, @@ -3273,7 +3728,13 @@ def create( credentials: Optional[auth_credentials.Credentials] = None, encryption_spec_key_name: Optional[str] = None, sync=True, - private_service_connect_config: Optional[PrivateServiceConnectConfig] = None, + private_service_connect_config: Union[ + Optional[PrivateServiceConnectConfig], + Optional[gca_service_networking.PrivateServiceConnectConfig], + ] = None, + enable_request_response_logging=False, + request_response_logging_sampling_rate: Optional[float] = None, + request_response_logging_bq_destination_table: Optional[str] = None, inference_timeout: Optional[int] = None, ) -> "PrivateEndpoint": """Creates a new PrivateEndpoint. @@ -3299,7 +3760,8 @@ def create( display_name="my_endpoint_name", project="my_project_id", location="us-central1", - private_service_connect=aiplatform.PrivateEndpoint.PrivateServiceConnectConfig( + private_service_connect=aiplatform.compat.types.service_networking.PrivateServiceConnectConfig( + enable_private_service_connect=True, project_allowlist=["test-project"]), ) @@ -3307,68 +3769,72 @@ def create( my_private_endpoint = aiplatform.PrivateEndpoint.create( display_name="my_endpoint_name", - private_service_connect=aiplatform.PrivateEndpoint.PrivateServiceConnectConfig( + private_service_connect=aiplatform.compat.types.service_networking.PrivateServiceConnectConfig( + enable_private_service_connect=True, project_allowlist=["test-project"]), ) Args: - display_name (str): - Required. The user-defined name of the Endpoint. - The name can be up to 128 characters long and can be consist - of any UTF-8 characters. - project (str): - Optional. Project to retrieve endpoint from. If not set, project - set in aiplatform.init will be used. - location (str): - Optional. Location to retrieve endpoint from. If not set, location - set in aiplatform.init will be used. - network (str): - Optional. The full name of the Compute Engine network to which - this Endpoint will be peered. E.g. "projects/123456789123/global/networks/my_vpc". - Private services access must already be configured for the network. - If left unspecified, the network set with aiplatform.init will be used. - Cannot be set together with private_service_connect_config. - description (str): - Optional. The description of the Endpoint. - labels (Dict[str, str]): - Optional. The labels with user-defined metadata to - organize your Endpoints. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - credentials (auth_credentials.Credentials): - Optional. Custom credentials to use to upload this model. Overrides - credentials set in aiplatform.init. - encryption_spec_key_name (str): - Optional. The Cloud KMS resource identifier of the customer - managed encryption key used to protect the model. Has the + display_name (str): Required. The user-defined name of the Endpoint. The + name can be up to 128 characters long and can be consist of any UTF-8 + characters. + project (str): Optional. Project to retrieve endpoint from. If not set, + project set in aiplatform.init will be used. + location (str): Optional. Location to retrieve endpoint from. If not + set, location set in aiplatform.init will be used. + network (str): Optional. The full name of the Compute Engine network to + which this Endpoint will be peered. E.g. + "projects/123456789123/global/networks/my_vpc". Private services + access must already be configured for the network. If left + unspecified, the network set with aiplatform.init will be used. Cannot + be set together with private_service_connect_config. + description (str): Optional. The description of the Endpoint. + labels (Dict[str, str]): Optional. The labels with user-defined metadata + to organize your Endpoints. Label keys and values can be no longer + than 64 characters (Unicode codepoints), can only contain lowercase + letters, numeric characters, underscores and dashes. International + characters are allowed. See https://goo.gl/xmQnxf for more information + and examples of labels. + credentials (auth_credentials.Credentials): Optional. Custom credentials + to use to upload this model. Overrides credentials set in + aiplatform.init. + encryption_spec_key_name (str): Optional. The Cloud KMS resource + identifier of the customer managed encryption key used to protect the + model. Has the form: - ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. - The key needs to be in the same region as where the compute - resource is created. - - If set, this Model and all sub-resources of this Model will be secured by this key. - - Overrides encryption_spec_key_name set in aiplatform.init. - sync (bool): - Whether to execute this method synchronously. If False, this method - will be executed in concurrent Future and any downstream object will - be immediately returned and synced when the Future has completed. - private_service_connect_config (aiplatform.PrivateEndpoint.PrivateServiceConnectConfig): - [Private Service Connect](https://cloud.google.com/vpc/docs/private-service-connect) configuration for the endpoint. - Cannot be set when network is specified. - inference_timeout (int): - Optional. It defines the prediction timeout, in seconds, for online predictions using cloud-based endpoints. This applies to either PSC endpoints, when private_service_connect_config is set, or dedicated endpoints, when dedicated_endpoint_enabled is true. + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. If set, this Model and all sub-resources of + this Model will be secured by this key. Overrides + encryption_spec_key_name set in aiplatform.init. + sync (bool): Whether to execute this method synchronously. If False, + this method will be executed in concurrent Future and any downstream + object will be immediately returned and synced when the Future has + completed. + private_service_connect_config + (aiplatform.compat.types.service_networking.PrivateServiceConnectConfig): [Private + Service Connect Configuration](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/PrivateServiceConnectConfig) + for the endpoint. Cannot be set when network is specified. + enable_request_response_logging (bool): Optional. Whether to enable + request & response logging for this endpoint. + request_response_logging_sampling_rate (float): Optional. The request + response logging sampling rate. If not set, default is 0.0. + request_response_logging_bq_destination_table (str): Optional. The + request response logging bigquery destination. If not set, will create + a table with name: + ``bq://{project_id}.logging_{endpoint_display_name}_{endpoint_id}.request_response_logging``. + inference_timeout (int): Optional. It defines the prediction timeout, in + seconds, for online predictions using cloud-based endpoints. This + applies to either PSC endpoints, when private_service_connect_config + is set, or dedicated endpoints, when dedicated_endpoint_enabled is + true. Returns: endpoint (aiplatform.PrivateEndpoint): Created endpoint. Raises: - ValueError: A network must be instantiated when creating a PrivateEndpoint. + ValueError: A network must be instantiated when creating a + PrivateEndpoint. """ api_client = cls._instantiate_client(location=location, credentials=credentials) @@ -3394,8 +3860,26 @@ def create( config = None if private_service_connect_config: - config = ( - private_service_connect_config._gapic_private_service_connect_config + if hasattr( + private_service_connect_config, + "_gapic_private_service_connect_config", + ): + config = ( + private_service_connect_config._gapic_private_service_connect_config + ) + else: + config = private_service_connect_config + + predict_request_response_logging_config = None + if enable_request_response_logging: + predict_request_response_logging_config = ( + gca_endpoint_compat.PredictRequestResponseLoggingConfig( + enabled=True, + sampling_rate=request_response_logging_sampling_rate, + bigquery_destination=gca_io_compat.BigQueryDestination( + output_uri=request_response_logging_bq_destination_table + ), + ) ) client_connection_config = None @@ -3418,6 +3902,7 @@ def create( network=network, sync=sync, private_service_connect_config=config, + predict_request_response_logging_config=predict_request_response_logging_config, client_connection_config=client_connection_config, ) @@ -3467,6 +3952,7 @@ def _construct_sdk_resource_from_gapic( ) endpoint._http_client = urllib3.PoolManager(cert_reqs="CERT_NONE") + endpoint._authorized_session = None return endpoint @@ -3549,7 +4035,7 @@ def predict( Example usage: PSA based private endpoint: - response = my_private_endpoint.predict(instances=[...]) + response = my_private_endpoint.predict(instances=[...], parameters={...}) my_predictions = response.predictions PSC based private endpoint: @@ -3615,7 +4101,7 @@ def predict( response = self._http_request( method="POST", url=self.predict_http_uri, - body=json.dumps({"instances": instances}), + body=json.dumps({"instances": instances, "parameters": parameters}), headers={"Content-Type": "application/json"}, ) prediction_response = json.loads(response.data) @@ -3639,24 +4125,31 @@ def predict( "address or DNS." ) - if not self.credentials.valid: - self.credentials.refresh(google_auth_requests.Request()) + if not self._authorized_session: + self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES + self._authorized_session = google_auth_requests.AuthorizedSession( + self.credentials, + ) + self._authorized_session.verify = False - token = self.credentials.token - headers = { - "Authorization": f"Bearer {token}", - "Content-Type": "application/json", - } + if parameters: + data = json.dumps({"instances": instances, "parameters": parameters}) + else: + data = json.dumps({"instances": instances}) url = f"https://{endpoint_override}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}:predict" - response = self._http_request( - method="POST", + response = self._authorized_session.post( url=url, - body=json.dumps({"instances": instances}), - headers=headers, + data=data, + headers={"Content-Type": "application/json"}, ) - prediction_response = json.loads(response.data) + if response.status_code != 200: + raise ValueError( + f"Failed to make prediction request. Status code:" + f"{response.status_code}, response: {response.text}." + ) + prediction_response = json.loads(response.text) return Prediction( predictions=prediction_response.get("predictions"), @@ -3736,19 +4229,19 @@ def raw_predict( "Invalid endpoint override provided. Please only use IP" "address or DNS." ) - if not self.credentials.valid: - self.credentials.refresh(google_auth_requests.Request()) - token = self.credentials.token - headers_with_token = dict(headers) - headers_with_token["Authorization"] = f"Bearer {token}" + if not self._authorized_session: + self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES + self._authorized_session = google_auth_requests.AuthorizedSession( + self.credentials, + ) + self._authorized_session.verify = False url = f"https://{endpoint_override}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}:rawPredict" - return self._http_request( - method="POST", + return self._authorized_session.post( url=url, - body=body, - headers=headers_with_token, + data=body, + headers=headers, ) def stream_raw_predict( @@ -3816,24 +4309,19 @@ def stream_raw_predict( "Invalid endpoint override provided. Please only use IP" "address or DNS." ) - if not self.credentials.valid: - self.credentials.refresh(google_auth_requests.Request()) - token = self.credentials.token - headers_with_token = dict(headers) - headers_with_token["Authorization"] = f"Bearer {token}" - - if not self.authorized_session: + if not self._authorized_session: self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES - self.authorized_session = google_auth_requests.AuthorizedSession( - self.credentials + self._authorized_session = google_auth_requests.AuthorizedSession( + self.credentials, ) + self._authorized_session.verify = False url = f"https://{endpoint_override}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}:streamRawPredict" - with self.authorized_session.post( + with self._authorized_session.post( url=url, data=body, - headers=headers_with_token, + headers=headers, stream=True, verify=False, ) as resp: @@ -3845,6 +4333,132 @@ def explain(self): f"{self.__class__.__name__} class does not support 'explain' as of now." ) + def invoke( + self, + request_path: str, + body: bytes, + headers: Dict[str, str], + deployed_model_id: Optional[str] = None, + stream: bool = False, + timeout: Optional[float] = None, + endpoint_override: Optional[str] = None, + ) -> Iterator[bytes]: + """Makes a prediction request for arbitrary paths. + + Example usage: + my_endpoint = aiplatform.PrivateEndpoint(ENDPOINT_ID) + response = my_endpoint.invoke( + request_path="/v1/chat/completions", + body = json.dumps(DATA).encode("utf-8"), + headers = {'Content-Type':'application/json'}, + endpoint_override="10.128.0.3", + ) + status_code = response.status_code + results = json.dumps(response.text) + + for stream_response in my_endpoint.invoke( + request_path="/v1/chat/completions", + body = json.dumps(DATA).encode("utf-8"), + headers = {'Content-Type':'application/json'}, + stream=True, + endpoint_override="10.128.0.3", + ): + stream_response_text = stream_response.decode('utf-8') + + Args: + request_path (str): + The request url to the model server. The request path must be + a string that starts with a forward slash. Root can't be + accessed. + + body (bytes): + The body of the prediction request in bytes. This must not exceed 1.5 mb per request. + + headers (Dict[str, str]): + The header of the request as a dictionary. There are no restrictions on the header. + + deployed_model_id (str): + Optional. If specified, this InvokeRequest will be served by the + chosen DeployedModel, overriding this Endpoint's traffic split. + + stream (bool): If set to True, streaming will be enabled. + + timeout (float): Optional. The timeout for this request in seconds. + + endpoint_override (Optional[str]): + The Private Service Connect endpoint's IP address or DNS that + points to the endpoint's service attachment. + + Returns: + By default, a requests.models.Response object containing the status code and prediction results is returned. + For stream=True, the response will be of type Iterator[requests.models.Response]. + + Raises: + ValueError: If a endpoint override is not provided for PSC based + endpoint. + ValueError: If a endpoint override is invalid for PSC based endpoint. + """ + self.wait() + if self.network or not self.private_service_connect_config: + raise ValueError("PSA based private endpoint does not support invoke.") + + if self.private_service_connect_config: + if not endpoint_override: + raise ValueError( + "Cannot make an invoke request because endpoint override is" + "not provided. Please ensure an endpoint override is" + "provided." + ) + if not self._validate_endpoint_override(endpoint_override): + raise ValueError( + "Invalid endpoint override provided. Please only use IP" + "address or DNS." + ) + + if not self._authorized_session: + self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES + self._authorized_session = google_auth_requests.AuthorizedSession( + self.credentials, + ) + self._authorized_session.verify = False + if len(request_path) < 0 or request_path[0] != "/": + raise ValueError( + "container path must be a string that starts with a forward slash." + ) + + url = f"https://{endpoint_override}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}" + if deployed_model_id: + deployed_model_ids = set() + if hasattr(self._gca_resource, "deployed_models"): + for deployed_model in self._gca_resource.deployed_models: + deployed_model_ids.add(deployed_model.id) + if deployed_model_id not in deployed_model_ids: + raise ValueError( + f"Deployed model {deployed_model_id} not found in endpoint" + f" {self.name}." + ) + url += f"/deployedModels/{deployed_model_id}" + url += "/invoke" + request_path + + def invoke_stream_response(): + with self._authorized_session.post( + url=url, + data=body, + headers=headers, + timeout=timeout, + stream=True, + ) as resp: + for line in resp.iter_lines(): + yield line + + if stream: + # This wrapping allows a Response object is returned for + # non-streaming requests. + return invoke_stream_response() + return self._authorized_session.post( + url=url, data=body, headers=headers, timeout=timeout + ) + def health_check(self) -> bool: """ Makes a request to this PrivateEndpoint's health check URI. Must be within network @@ -3947,6 +4561,7 @@ def deploy( max_replica_count: int = 1, accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, + gpu_partition_size: Optional[str] = None, tpu_topology: Optional[str] = None, service_account: Optional[str] = None, explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None, @@ -3963,6 +4578,15 @@ def deploy( reservation_affinity_values: Optional[List[str]] = None, spot: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, + autoscaling_target_cpu_utilization: Optional[int] = None, + autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, ) -> None: """Deploys a Model to the PrivateEndpoint. @@ -4021,6 +4645,8 @@ def deploy( NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 accelerator_count (int): Optional. The number of accelerators to attach to a worker replica. + gpu_partition_size (str): + Optional. The GPU partition Size for Nvidia MIG. tpu_topology (str): Optional. The TPU topology to use for the DeployedModel. Required for CloudTPU multihost deployments. @@ -4081,6 +4707,14 @@ def deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. """ if self.network: @@ -4098,6 +4732,7 @@ def deploy( traffic_split=traffic_split, traffic_percentage=traffic_percentage, deployment_resource_pool=None, + required_replica_count=required_replica_count, ) explanation_spec = _explanation_utils.create_and_validate_explanation_spec( @@ -4115,6 +4750,7 @@ def deploy( max_replica_count=max_replica_count, accelerator_type=accelerator_type, accelerator_count=accelerator_count, + gpu_partition_size=gpu_partition_size, tpu_topology=tpu_topology, reservation_affinity_type=reservation_affinity_type, reservation_affinity_key=reservation_affinity_key, @@ -4126,6 +4762,15 @@ def deploy( spot=spot, disable_container_logging=disable_container_logging, system_labels=system_labels, + required_replica_count=required_replica_count, + autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, + autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, ) def update( @@ -4690,6 +5335,7 @@ def upload( version_description: Optional[str] = None, serving_container_predict_route: Optional[str] = None, serving_container_health_route: Optional[str] = None, + serving_container_invoke_route_prefix: Optional[str] = None, description: Optional[str] = None, serving_container_command: Optional[Sequence[str]] = None, serving_container_args: Optional[Sequence[str]] = None, @@ -4720,6 +5366,7 @@ def upload( serving_container_health_probe_period_seconds: Optional[int] = None, serving_container_health_probe_timeout_seconds: Optional[int] = None, model_garden_source_model_name: Optional[str] = None, + model_garden_source_model_version_id: Optional[str] = None, ) -> "Model": """Uploads a model and returns a Model representing the uploaded Model resource. @@ -4775,6 +5422,12 @@ def upload( Optional. An HTTP path to send health check requests to the container, and which must be supported by it. If not specified a standard HTTP path will be used by Vertex AI. + serving_container_invoke_route_prefix (str): + Optional. Invoke route prefix for the custom container. "/*" is the only + supported value right now. By setting this field, any non-root route on + this model will be accessible with invoke http call + eg: "/invoke/foo/bar", however the [PredictionService.Invoke] RPC is not + supported yet. description (str): The description of the model. serving_container_command: Optional[Sequence[str]]=None, @@ -4935,7 +5588,9 @@ def upload( model_garden_source_model_name: Optional. The model garden source model resource name if the model is from Vertex Model Garden. - + model_garden_source_model_version_id: + Optional. The model garden source model version id if the + model is from Vertex Model Garden. Returns: model (aiplatform.Model): @@ -5037,6 +5692,7 @@ def upload( grpc_ports=grpc_ports, predict_route=serving_container_predict_route, health_route=serving_container_health_route, + invoke_route_prefix=serving_container_invoke_route_prefix, deployment_timeout=deployment_timeout, shared_memory_size_mb=serving_container_shared_memory_size_mb, startup_probe=startup_probe, @@ -5066,11 +5722,19 @@ def upload( base_model_source = None if model_garden_source_model_name: - base_model_source = gca_model_compat.Model.BaseModelSource( - model_garden_source=gca_model_compat.ModelGardenSource( - public_model_name=model_garden_source_model_name + if model_garden_source_model_version_id: + base_model_source = gca_model_compat.Model.BaseModelSource( + model_garden_source=gca_model_compat.ModelGardenSource( + public_model_name=model_garden_source_model_name, + version_id=model_garden_source_model_version_id, + ) + ) + else: + base_model_source = gca_model_compat.Model.BaseModelSource( + model_garden_source=gca_model_compat.ModelGardenSource( + public_model_name=model_garden_source_model_name, + ) ) - ) managed_model = gca_model_compat.Model( display_name=display_name, @@ -5165,6 +5829,7 @@ def deploy( max_replica_count: int = 1, accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, + gpu_partition_size: Optional[str] = None, tpu_topology: Optional[str] = None, service_account: Optional[str] = None, explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None, @@ -5178,6 +5843,12 @@ def deploy( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, enable_access_logging=False, disable_container_logging: bool = False, private_service_connect_config: Optional[ @@ -5190,6 +5861,7 @@ def deploy( spot: bool = False, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, ) -> Union[Endpoint, PrivateEndpoint]: """Deploys model to endpoint. Endpoint will be created if unspecified. @@ -5239,6 +5911,8 @@ def deploy( NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 accelerator_count (int): Optional. The number of accelerators to attach to a worker replica. + gpu_partition_size (str): + Optional. The GPU partition Size for Nvidia MIG. tpu_topology (str): Optional. The TPU topology to use for the DeployedModel. Requireid for CloudTPU multihost deployments. @@ -5292,6 +5966,22 @@ def deploy( Optional. Target Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): + Optional. The target number of requests per minute for autoscaling. + If set, the model will be scaled based on the number of requests it receives. + autoscaling_target_dcgm_fi_dev_gpu_util (int): + Optional. Target DCGM metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): + Optional. Target vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): + Optional. Target vLLM metrics for number of inference requests + currently waiting in the queue. + autoscaling_target_pubsub_num_undelivered_messages (int): + Optional. The target number of pubsub undelivered messages for autoscaling. + If set, the model will be scaled based on the pubsub queue size. + autoscaling_pubsub_subscription_labels (Dict[str, str]): + Optional. Monitored resource labels as key value pairs for + metric filtering for pubsub_num_undelivered_messages. enable_access_logging (bool): Whether to enable endpoint access logging. Defaults to False. disable_container_logging (bool): @@ -5327,6 +6017,37 @@ def deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + autoscaling_target_cpu_utilization (int): + Target CPU Utilization to use for Autoscaling Replicas. + A default value of 60 will be used if not specified. + autoscaling_target_accelerator_duty_cycle (int): + Target Accelerator Duty Cycle. + Must also set accelerator_type and accelerator_count if specified. + A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): + Optional. The target number of requests per minute for autoscaling. + If set, the model will be scaled based on the number of requests it receives. + autoscaling_target_dcgm_fi_dev_gpu_util (int): + Optional. Target DCGM metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): + Optional. Target vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): + Optional. Target vLLM metrics for number of inference requests + currently waiting in the queue. + autoscaling_target_pubsub_num_undelivered_messages (int): + Optional. The target number of pubsub undelivered messages for autoscaling. + If set, the model will be scaled based on the pubsub queue size. + autoscaling_pubsub_subscription_labels (Dict[str, str]): + Optional. Monitored resource labels as key value pairs for + metric filtering for pubsub_num_undelivered_messages. Returns: endpoint (Union[Endpoint, PrivateEndpoint]): @@ -5345,6 +6066,7 @@ def deploy( traffic_split=traffic_split, traffic_percentage=traffic_percentage, deployment_resource_pool=deployment_resource_pool, + required_replica_count=required_replica_count, ) if isinstance(endpoint, PrivateEndpoint): @@ -5376,6 +6098,7 @@ def deploy( max_replica_count=max_replica_count, accelerator_type=accelerator_type, accelerator_count=accelerator_count, + gpu_partition_size=gpu_partition_size, tpu_topology=tpu_topology, reservation_affinity_type=reservation_affinity_type, reservation_affinity_key=reservation_affinity_key, @@ -5390,6 +6113,12 @@ def deploy( deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, spot=spot, enable_access_logging=enable_access_logging, disable_container_logging=disable_container_logging, @@ -5397,6 +6126,7 @@ def deploy( deployment_resource_pool=deployment_resource_pool, fast_tryout_enabled=fast_tryout_enabled, system_labels=system_labels, + required_replica_count=required_replica_count, ) def _should_enable_dedicated_endpoint(self, fast_tryout_enabled: bool) -> bool: @@ -5418,6 +6148,7 @@ def _deploy( max_replica_count: int = 1, accelerator_type: Optional[str] = None, accelerator_count: Optional[int] = None, + gpu_partition_size: Optional[str] = None, tpu_topology: Optional[str] = None, reservation_affinity_type: Optional[str] = None, reservation_affinity_key: Optional[str] = None, @@ -5431,6 +6162,12 @@ def _deploy( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, spot: bool = False, enable_access_logging=False, disable_container_logging: bool = False, @@ -5440,6 +6177,7 @@ def _deploy( deployment_resource_pool: Optional[DeploymentResourcePool] = None, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, ) -> Union[Endpoint, PrivateEndpoint]: """Deploys model to endpoint. Endpoint will be created if unspecified. @@ -5489,6 +6227,8 @@ def _deploy( NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4 accelerator_count (int): Optional. The number of accelerators to attach to a worker replica. + gpu_partition_size (str): + Optional. The GPU partition Size for Nvidia MIG. tpu_topology (str): Optional. The TPU topology to use for the DeployedModel. Requireid for CloudTPU multihost deployments. @@ -5547,6 +6287,22 @@ def _deploy( Optional. Target Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): + Optional. The target number of requests per minute for autoscaling. + If set, the model will be scaled based on the number of requests it receives. + autoscaling_target_dcgm_fi_dev_gpu_util (int): + Optional. Target DCGM metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): + Optional. Target vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): + Optional. Target vLLM metrics for number of inference requests + currently waiting in the queue. + autoscaling_target_pubsub_num_undelivered_messages (int): + Optional. The target number of pubsub undelivered messages for autoscaling. + If set, the model will be scaled based on the pubsub queue size. + autoscaling_pubsub_subscription_labels (Dict[str, str]): + Optional. Monitored resource labels as key value pairs for + metric filtering for pubsub_num_undelivered_messages. spot (bool): Optional. Whether to schedule the deployment workload on spot VMs. enable_access_logging (bool): @@ -5570,6 +6326,14 @@ def _deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. Returns: endpoint (Union[Endpoint, PrivateEndpoint]): @@ -5617,6 +6381,7 @@ def _deploy( max_replica_count=max_replica_count, accelerator_type=accelerator_type, accelerator_count=accelerator_count, + gpu_partition_size=gpu_partition_size, tpu_topology=tpu_topology, reservation_affinity_type=reservation_affinity_type, reservation_affinity_key=reservation_affinity_key, @@ -5627,12 +6392,19 @@ def _deploy( deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, spot=spot, enable_access_logging=enable_access_logging, disable_container_logging=disable_container_logging, deployment_resource_pool=deployment_resource_pool, fast_tryout_enabled=fast_tryout_enabled, system_labels=system_labels, + required_replica_count=required_replica_count, ) _LOGGER.log_action_completed_against_resource("model", "deployed", endpoint) diff --git a/google/cloud/aiplatform/persistent_resource.py b/google/cloud/aiplatform/persistent_resource.py index f0944a5bb4..06c1914676 100644 --- a/google/cloud/aiplatform/persistent_resource.py +++ b/google/cloud/aiplatform/persistent_resource.py @@ -171,6 +171,7 @@ def create( labels: Optional[Dict[str, str]] = None, network: Optional[str] = None, kms_key_name: Optional[str] = None, + enable_custom_service_account: Optional[bool] = None, service_account: Optional[str] = None, reserved_ip_ranges: List[str] = None, sync: Optional[bool] = True, # pylint: disable=unused-argument @@ -234,6 +235,16 @@ def create( PersistentResource. If set, this PersistentResource and all sub-resources of this PersistentResource will be secured by this key. + enable_custom_service_account (bool): + Optional. When set to True, allows the `service_account` + parameter to specify a custom service account for workloads on this + PersistentResource. Defaults to None (False behavior). + + If True, the service account provided in the `service_account` parameter + will be used for workloads (runtimes, jobs), provided the user has the + ``iam.serviceAccounts.actAs`` permission. If False, the + `service_account` parameter is ignored, and the PersistentResource + will use the default service account. service_account (str): Optional. Default service account that this PersistentResource's workloads run as. The workloads @@ -295,7 +306,31 @@ def create( gca_encryption_spec_compat.EncryptionSpec(kms_key_name=kms_key_name) ) - if service_account: + # Raise ValueError if enable_custom_service_account is False but + # service_account is provided + if ( + enable_custom_service_account is False and service_account is not None + ): # pylint: disable=g-bool-id-comparison + raise ValueError( + "The parameter `enable_custom_service_account` was set to False, " + "but a value was provided for `service_account`. These two " + "settings are incompatible. If you want to use a custom " + "service account, set `enable_custom_service_account` to True." + ) + + elif enable_custom_service_account: + service_account_spec = gca_persistent_resource_compat.ServiceAccountSpec( + enable_custom_service_account=True, + # Set service_account if it is provided, otherwise set to None + service_account=service_account if service_account else None, + ) + gca_persistent_resource.resource_runtime_spec = ( + gca_persistent_resource_compat.ResourceRuntimeSpec( + service_account_spec=service_account_spec + ) + ) + elif service_account: + # Handle the deprecated case where only service_account is provided service_account_spec = gca_persistent_resource_compat.ServiceAccountSpec( enable_custom_service_account=True, service_account=service_account ) diff --git a/google/cloud/aiplatform/pipeline_job_schedules.py b/google/cloud/aiplatform/pipeline_job_schedules.py index 1b68b4cb93..e01173bbbb 100644 --- a/google/cloud/aiplatform/pipeline_job_schedules.py +++ b/google/cloud/aiplatform/pipeline_job_schedules.py @@ -129,6 +129,7 @@ def create( allow_queueing: bool = False, max_run_count: Optional[int] = None, max_concurrent_run_count: int = 1, + max_concurrent_active_run_count: Optional[int] = None, service_account: Optional[str] = None, network: Optional[str] = None, create_request_timeout: Optional[float] = None, @@ -155,6 +156,10 @@ def create( Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. + max_concurrent_active_run_count (int): + Optional. Maximum number of active runs that can be executed + concurrently for this PipelineJobSchedule. Active runs are those + in a non-terminal state (e.g., RUNNING, PENDING, or QUEUED). service_account (str): Optional. Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. @@ -176,6 +181,7 @@ def create( allow_queueing=allow_queueing, max_run_count=max_run_count, max_concurrent_run_count=max_concurrent_run_count, + max_concurrent_active_run_count=max_concurrent_active_run_count, service_account=service_account, network=network, create_request_timeout=create_request_timeout, @@ -189,6 +195,7 @@ def _create( allow_queueing: bool = False, max_run_count: Optional[int] = None, max_concurrent_run_count: int = 1, + max_concurrent_active_run_count: Optional[int] = None, service_account: Optional[str] = None, network: Optional[str] = None, create_request_timeout: Optional[float] = None, @@ -215,6 +222,10 @@ def _create( Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. + max_concurrent_active_run_count (int): + Optional. Maximum number of active runs that can be executed + concurrently for this PipelineJobSchedule. Active runs are those + in a non-terminal state (e.g., RUNNING, PENDING, or QUEUED). service_account (str): Optional. Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. @@ -239,6 +250,10 @@ def _create( self._gca_resource.max_run_count = max_run_count if max_concurrent_run_count: self._gca_resource.max_concurrent_run_count = max_concurrent_run_count + if max_concurrent_active_run_count: + self._gca_resource.max_concurrent_active_run_count = ( + max_concurrent_active_run_count + ) service_account = service_account or initializer.global_config.service_account network = network or initializer.global_config.network @@ -383,6 +398,7 @@ def update( allow_queueing: Optional[bool] = None, max_run_count: Optional[int] = None, max_concurrent_run_count: Optional[int] = None, + max_concurrent_active_run_count: Optional[int] = None, ) -> None: """Update an existing PipelineJobSchedule. @@ -415,6 +431,10 @@ def update( Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. + max_concurrent_active_run_count (int): + Optional. Maximum number of active runs that can be executed + concurrently for this PipelineJobSchedule. Active runs are those + in a non-terminal state (e.g., RUNNING, PENDING, or QUEUED). Raises: RuntimeError: User tried to call update() before create(). @@ -451,6 +471,13 @@ def update( "max_concurrent_run_count", max_concurrent_run_count, ) + if max_concurrent_active_run_count is not None: + updated_fields.append("max_concurrent_active_run_count") + setattr( + pipeline_job_schedule, + "max_concurrent_active_run_count", + max_concurrent_active_run_count, + ) update_mask = field_mask.FieldMask(paths=updated_fields) self.api_client.update_schedule( diff --git a/google/cloud/aiplatform/pipeline_jobs.py b/google/cloud/aiplatform/pipeline_jobs.py index 5fdd8fcac0..8682fcb48f 100644 --- a/google/cloud/aiplatform/pipeline_jobs.py +++ b/google/cloud/aiplatform/pipeline_jobs.py @@ -542,6 +542,7 @@ def create_schedule( allow_queueing: bool = False, max_run_count: Optional[int] = None, max_concurrent_run_count: int = 1, + max_concurrent_active_run_count: Optional[int] = None, service_account: Optional[str] = None, network: Optional[str] = None, create_request_timeout: Optional[float] = None, @@ -582,6 +583,10 @@ def create_schedule( Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. + max_concurrent_active_run_count (int): + Optional. Maximum number of active runs that can be executed + concurrently for this PipelineJobSchedule. Active runs are those + in a non-terminal state (e.g., RUNNING, PENDING, or QUEUED). service_account (str): Optional. Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. @@ -616,6 +621,7 @@ def create_schedule( allow_queueing=allow_queueing, max_run_count=max_run_count, max_concurrent_run_count=max_concurrent_run_count, + max_concurrent_active_run_count=max_concurrent_active_run_count, service_account=service_account, network=network, create_request_timeout=create_request_timeout, @@ -868,15 +874,19 @@ def list( Supported fields: `display_name`, `create_time`, `update_time` enable_simple_view (bool): Optional. Whether to pass the `read_mask` parameter to the list call. - Defaults to False if not provided. This will improve the performance of calling - list(). However, the returned PipelineJob list will not include all fields for - each PipelineJob. Setting this to True will exclude the following fields in your - response: `runtime_config`, `service_account`, `network`, and some subfields of - `pipeline_spec` and `job_detail`. The following fields will be included in - each PipelineJob resource in your response: `state`, `display_name`, - `pipeline_spec.pipeline_info`, `create_time`, `start_time`, `end_time`, - `update_time`, `labels`, `template_uri`, `template_metadata.version`, - `job_detail.pipeline_run_context`, `job_detail.pipeline_context`. + Defaults to `False` if not provided. This improves the performance of + calling the `list()` method. It's recommended to set this parameter to + `True` if your region has multiple pipeline runs, which can result in + high latency and errors. However, the returned pipeline job list + doesn't include all fields for each `PipelineJob`. + Setting this to `True` excludes the following fields from your response: + `runtime_config`, `service_account`, `network`, and some subfields of + `pipeline_spec` and `job_detail`. The following + fields are included in each `PipelineJob` resource in your response: + `state`, `display_name`, `pipeline_spec.pipeline_info`, `create_time`, + `start_time`, `end_time`, `update_time`, `labels`, `template_uri`, + `template_metadata.version`, `job_detail.pipeline_run_context`, and + `job_detail.pipeline_context`. project (str): Optional. Project to retrieve list from. If not set, project set in aiplatform.init will be used. @@ -1014,9 +1024,9 @@ def _query_experiment_row( row.params = {} for key, value in system_run_executions[0].metadata.items(): if key.startswith(metadata_constants.PIPELINE_PARAM_PREFIX): - row.params[ - key[len(metadata_constants.PIPELINE_PARAM_PREFIX) :] - ] = value + row.params[key[len(metadata_constants.PIPELINE_PARAM_PREFIX) :]] = ( + value + ) row.state = system_run_executions[0].state.name for metric_artifact in metric_artifacts: @@ -1277,7 +1287,7 @@ def from_pipeline_func( job_id = job_id or re.sub( r"[^-a-z0-9]", "-", automatic_display_name.lower() ).strip("-") - pipeline_file = tempfile.mktemp(suffix=".json") + pipeline_file = tempfile.mkstemp(suffix=".json") compiler_v2.Compiler().compile( pipeline_func=pipeline_func, pipeline_name=context_name, diff --git a/google/cloud/aiplatform/prediction/local_model.py b/google/cloud/aiplatform/prediction/local_model.py index 313c68c58b..23543bda26 100644 --- a/google/cloud/aiplatform/prediction/local_model.py +++ b/google/cloud/aiplatform/prediction/local_model.py @@ -246,6 +246,7 @@ def build_cpr_model( requirements_path: Optional[str] = None, extra_packages: Optional[List[str]] = None, no_cache: bool = False, + platform: Optional[str] = None, ) -> "LocalModel": """Builds a local model from a custom predictor. @@ -274,6 +275,7 @@ def build_cpr_model( predictor=$CUSTOM_PREDICTOR_CLASS, requirements_path="./user_src_dir/requirements.txt", extra_packages=["./user_src_dir/user_code/custom_package.tar.gz"], + platform="linux/amd64", # i.e., if you're building on a non-x86 machine ) In the built image, user provided files will be copied as follows: @@ -340,6 +342,10 @@ def build_cpr_model( reduces the image building time. See https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#leverage-build-cache for more details. + platform (str): + Optional. The target platform for the Docker image build. See + https://docs.docker.com/build/building/multi-platform/#building-multi-platform-images + for more details. Returns: local model: Instantiated representation of the local model. @@ -391,6 +397,7 @@ def build_cpr_model( pip_command="pip3" if is_prebuilt_prediction_image else "pip", python_command="python3" if is_prebuilt_prediction_image else "python", no_cache=no_cache, + platform=platform, ) container_spec = gca_model_compat.ModelContainerSpec( diff --git a/google/cloud/aiplatform/prediction/predictor.py b/google/cloud/aiplatform/prediction/predictor.py index 0560b97d30..996ffd2150 100644 --- a/google/cloud/aiplatform/prediction/predictor.py +++ b/google/cloud/aiplatform/prediction/predictor.py @@ -40,12 +40,15 @@ def __init__(self): return @abstractmethod - def load(self, artifacts_uri: str) -> None: + def load(self, artifacts_uri: str, **kwargs) -> None: """Loads the model artifact. Args: artifacts_uri (str): Required. The value of the environment variable AIP_STORAGE_URI. + **kwargs: + Optional. Additional keyword arguments for security or + configuration (e.g., allowed_extensions). """ pass diff --git a/google/cloud/aiplatform/prediction/serializer.py b/google/cloud/aiplatform/prediction/serializer.py index 8016e46115..569dcda6aa 100644 --- a/google/cloud/aiplatform/prediction/serializer.py +++ b/google/cloud/aiplatform/prediction/serializer.py @@ -27,7 +27,9 @@ 'Please install the SDK using `pip install "google-cloud-aiplatform[prediction]>=1.16.0"`.' ) -from google.cloud.aiplatform.constants import prediction as prediction_constants +from google.cloud.aiplatform.constants import ( + prediction as prediction_constants, +) from google.cloud.aiplatform.prediction import handler_utils diff --git a/google/cloud/aiplatform/prediction/sklearn/__init__.py b/google/cloud/aiplatform/prediction/sklearn/__init__.py index d7144d1426..4d35c0f20e 100644 --- a/google/cloud/aiplatform/prediction/sklearn/__init__.py +++ b/google/cloud/aiplatform/prediction/sklearn/__init__.py @@ -15,6 +15,8 @@ # limitations under the License. # -from google.cloud.aiplatform.prediction.sklearn.predictor import SklearnPredictor +from google.cloud.aiplatform.prediction.sklearn.predictor import ( + SklearnPredictor, +) __all__ = ("SklearnPredictor",) diff --git a/google/cloud/aiplatform/prediction/sklearn/predictor.py b/google/cloud/aiplatform/prediction/sklearn/predictor.py index e5f3d96497..154458d1d8 100644 --- a/google/cloud/aiplatform/prediction/sklearn/predictor.py +++ b/google/cloud/aiplatform/prediction/sklearn/predictor.py @@ -19,6 +19,7 @@ import numpy as np import os import pickle +import warnings from google.cloud.aiplatform.constants import prediction from google.cloud.aiplatform.utils import prediction_utils @@ -31,21 +32,57 @@ class SklearnPredictor(Predictor): def __init__(self): return - def load(self, artifacts_uri: str) -> None: + def load(self, artifacts_uri: str, **kwargs) -> None: """Loads the model artifact. Args: artifacts_uri (str): Required. The value of the environment variable AIP_STORAGE_URI. + **kwargs: + Optional. Additional keyword arguments for security or + configuration. Supported arguments: + allowed_extensions (list[str]): + The allowed file extensions for model artifacts. + If not provided, a UserWarning is issued. Raises: ValueError: If there's no required model files provided in the artifacts uri. """ + + allowed_extensions = kwargs.get("allowed_extensions", None) + + if allowed_extensions is None: + warnings.warn( + "No 'allowed_extensions' provided. Loading model artifacts from " + "untrusted sources may lead to remote code execution.", + UserWarning, + ) + prediction_utils.download_model_artifacts(artifacts_uri) - if os.path.exists(prediction.MODEL_FILENAME_JOBLIB): + if os.path.exists( + prediction.MODEL_FILENAME_JOBLIB + ) and prediction_utils.is_extension_allowed( + filename=prediction.MODEL_FILENAME_JOBLIB, + allowed_extensions=allowed_extensions, + ): + warnings.warn( + f"Loading {prediction.MODEL_FILENAME_JOBLIB} using joblib pickle, which is unsafe. " + "Only load files from trusted sources.", + RuntimeWarning, + ) self._model = joblib.load(prediction.MODEL_FILENAME_JOBLIB) - elif os.path.exists(prediction.MODEL_FILENAME_PKL): + elif os.path.exists( + prediction.MODEL_FILENAME_PKL + ) and prediction_utils.is_extension_allowed( + filename=prediction.MODEL_FILENAME_PKL, + allowed_extensions=allowed_extensions, + ): + warnings.warn( + f"Loading {prediction.MODEL_FILENAME_PKL} using pickle, which is unsafe. " + "Only load files from trusted sources.", + RuntimeWarning, + ) self._model = pickle.load(open(prediction.MODEL_FILENAME_PKL, "rb")) else: valid_filenames = [ @@ -53,7 +90,7 @@ def load(self, artifacts_uri: str) -> None: prediction.MODEL_FILENAME_PKL, ] raise ValueError( - f"One of the following model files must be provided: {valid_filenames}." + f"One of the following model files must be provided and allowed: {valid_filenames}." ) def preprocess(self, prediction_input: dict) -> np.ndarray: diff --git a/google/cloud/aiplatform/prediction/xgboost/__init__.py b/google/cloud/aiplatform/prediction/xgboost/__init__.py index 53cd28bb6c..d65b388d35 100644 --- a/google/cloud/aiplatform/prediction/xgboost/__init__.py +++ b/google/cloud/aiplatform/prediction/xgboost/__init__.py @@ -15,6 +15,8 @@ # limitations under the License. # -from google.cloud.aiplatform.prediction.xgboost.predictor import XgboostPredictor +from google.cloud.aiplatform.prediction.xgboost.predictor import ( + XgboostPredictor, +) __all__ = ("XgboostPredictor",) diff --git a/google/cloud/aiplatform/prediction/xgboost/predictor.py b/google/cloud/aiplatform/prediction/xgboost/predictor.py index 005efcb129..fbb5911d8f 100644 --- a/google/cloud/aiplatform/prediction/xgboost/predictor.py +++ b/google/cloud/aiplatform/prediction/xgboost/predictor.py @@ -19,6 +19,7 @@ import logging import os import pickle +import warnings import numpy as np import xgboost as xgb @@ -34,21 +35,52 @@ class XgboostPredictor(Predictor): def __init__(self): return - def load(self, artifacts_uri: str) -> None: + def load(self, artifacts_uri: str, **kwargs) -> None: """Loads the model artifact. Args: artifacts_uri (str): Required. The value of the environment variable AIP_STORAGE_URI. + **kwargs: + Optional. Additional keyword arguments for security or + configuration. Supported arguments: + allowed_extensions (list[str]): + The allowed file extensions for model artifacts. + If not provided, a UserWarning is issued. Raises: ValueError: If there's no required model files provided in the artifacts uri. """ + allowed_extensions = kwargs.get("allowed_extensions", None) + + if allowed_extensions is None: + warnings.warn( + "No 'allowed_extensions' provided. Loading model artifacts from " + "untrusted sources may lead to remote code execution.", + UserWarning, + ) + prediction_utils.download_model_artifacts(artifacts_uri) - if os.path.exists(prediction.MODEL_FILENAME_BST): + + if os.path.exists( + prediction.MODEL_FILENAME_BST + ) and prediction_utils.is_extension_allowed( + filename=prediction.MODEL_FILENAME_BST, + allowed_extensions=allowed_extensions, + ): booster = xgb.Booster(model_file=prediction.MODEL_FILENAME_BST) - elif os.path.exists(prediction.MODEL_FILENAME_JOBLIB): + elif os.path.exists( + prediction.MODEL_FILENAME_JOBLIB + ) and prediction_utils.is_extension_allowed( + filename=prediction.MODEL_FILENAME_JOBLIB, + allowed_extensions=allowed_extensions, + ): + warnings.warn( + f"Loading {prediction.MODEL_FILENAME_JOBLIB} using joblib pickle, which is unsafe. " + "Only load files from trusted sources.", + RuntimeWarning, + ) try: booster = joblib.load(prediction.MODEL_FILENAME_JOBLIB) except KeyError: @@ -58,7 +90,17 @@ def load(self, artifacts_uri: str) -> None: ) booster = xgb.Booster() booster.load_model(prediction.MODEL_FILENAME_JOBLIB) - elif os.path.exists(prediction.MODEL_FILENAME_PKL): + elif os.path.exists( + prediction.MODEL_FILENAME_PKL + ) and prediction_utils.is_extension_allowed( + filename=prediction.MODEL_FILENAME_PKL, + allowed_extensions=allowed_extensions, + ): + warnings.warn( + f"Loading {prediction.MODEL_FILENAME_PKL} using pickle, which is unsafe. " + "Only load files from trusted sources.", + RuntimeWarning, + ) booster = pickle.load(open(prediction.MODEL_FILENAME_PKL, "rb")) else: valid_filenames = [ @@ -67,7 +109,7 @@ def load(self, artifacts_uri: str) -> None: prediction.MODEL_FILENAME_PKL, ] raise ValueError( - f"One of the following model files must be provided: {valid_filenames}." + f"One of the following model files must be provided and allowed: {valid_filenames}." ) self._booster = booster diff --git a/google/cloud/aiplatform/preview/datasets.py b/google/cloud/aiplatform/preview/datasets.py new file mode 100644 index 0000000000..1616abb991 --- /dev/null +++ b/google/cloud/aiplatform/preview/datasets.py @@ -0,0 +1,1643 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import dataclasses +import io +import json +from typing import Dict, List, Optional, Tuple +import uuid + +from google.auth import credentials as auth_credentials +from google.cloud import storage +from google.cloud.aiplatform import base +from google.cloud.aiplatform import compat +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform import utils +from google.cloud.aiplatform.compat.types import ( + dataset_v1beta1 as gca_dataset, + dataset_service_v1beta1 as gca_dataset_service, +) +from vertexai import generative_models +from vertexai.generative_models import _generative_models +from vertexai.preview import prompts + +from google.protobuf import field_mask_pb2 +from google.protobuf import struct_pb2 +from google.protobuf import json_format + + +_MULTIMODAL_METADATA_SCHEMA_URI = ( + "gs://google-cloud-aiplatform/schema/dataset/metadata/multimodal_1.0.0.yaml" +) +_DEFAULT_BQ_DATASET_PREFIX = "vertex_datasets" +_DEFAULT_BQ_TABLE_PREFIX = "multimodal_dataset" +_INPUT_CONFIG_FIELD = "inputConfig" +_BIGQUERY_SOURCE_FIELD = "bigquerySource" +_URI_FIELD = "uri" +_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD = "geminiTemplateConfigSource" +_GEMINI_TEMPLATE_CONFIG_FIELD = "geminiTemplateConfig" +_PROMPT_URI_FIELD = "promptUri" +_REQUEST_COLUMN_NAME_FIELD = "requestColumnName" +_BQ_MULTIREGIONS = {"us", "eu"} + +_LOGGER = base.Logger(__name__) + + +def _try_import_bigframes(): + """Try to import `bigframes` and return it if successful - otherwise raise an import error.""" + try: + import bigframes + import bigframes.pandas + import bigframes.bigquery + + return bigframes + except ImportError as exc: + raise ImportError( + "`bigframes` is not installed but required for this functionality." + ) from exc + + +def _get_metadata_for_bq( + *, + bq_uri: str, + template_config: Optional[gca_dataset_service.GeminiTemplateConfig] = None, + prompt_uri: Optional[str] = None, + request_column_name: Optional[str] = None, +) -> struct_pb2.Value: + if ( + sum( + 1 + for param in (template_config, prompt_uri, request_column_name) + if param is not None + ) + > 1 + ): + raise ValueError( + "Only one of template_config, prompt_uri, request_column_name can be specified." + ) + + input_config = {_INPUT_CONFIG_FIELD: {_BIGQUERY_SOURCE_FIELD: {_URI_FIELD: bq_uri}}} + if template_config is not None: + template_config_dict = gca_dataset_service.GeminiTemplateConfig.to_dict( + template_config + ) + input_config[_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD] = { + _GEMINI_TEMPLATE_CONFIG_FIELD: template_config_dict + } + if prompt_uri is not None: + input_config[_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD] = { + _PROMPT_URI_FIELD: prompt_uri + } + if request_column_name is not None: + input_config[_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD] = { + _REQUEST_COLUMN_NAME_FIELD: request_column_name + } + return json_format.ParseDict(input_config, struct_pb2.Value()) + + +def _bq_dataset_location_allowed( + vertex_location: str, bq_dataset_location: str +) -> bool: + if bq_dataset_location == vertex_location: + return True + if bq_dataset_location in _BQ_MULTIREGIONS: + return vertex_location.startswith(bq_dataset_location) + return False + + +def _normalize_and_validate_table_id( + *, + table_id: str, + project: Optional[str] = None, + vertex_location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, +): + from google.cloud import bigquery # pylint: disable=g-import-not-at-top + + if not project: + project = initializer.global_config.project + if not vertex_location: + vertex_location = initializer.global_config.location + if not credentials: + credentials = initializer.global_config.credentials + + table_ref = bigquery.TableReference.from_string(table_id, default_project=project) + if table_ref.project != project: + raise ValueError( + f"The BigQuery table " + f"`{table_ref.project}.{table_ref.dataset_id}.{table_ref.table_id}`" + " must be in the same project as the multimodal dataset." + f" The multimodal dataset is in `{project}`, but the BigQuery table" + f" is in `{table_ref.project}`." + ) + + dataset_ref = bigquery.DatasetReference( + project=table_ref.project, dataset_id=table_ref.dataset_id + ) + client = bigquery.Client(project=project, credentials=credentials) + bq_dataset = client.get_dataset(dataset_ref=dataset_ref) + if not _bq_dataset_location_allowed(vertex_location, bq_dataset.location): + raise ValueError( + f"The BigQuery dataset" + f" `{dataset_ref.project}.{dataset_ref.dataset_id}` must be in the" + " same location as the multimodal dataset. The multimodal dataset" + f" is in `{vertex_location}`, but the BigQuery dataset is in" + f" `{bq_dataset.location}`." + ) + return f"{table_ref.project}.{table_ref.dataset_id}.{table_ref.table_id}" + + +def _create_default_bigquery_dataset_if_not_exists( + *, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, +) -> str: + # Loading bigquery lazily to avoid auto-loading it when importing vertexai + from google.cloud import bigquery # pylint: disable=g-import-not-at-top + + if not project: + project = initializer.global_config.project + if not location: + location = initializer.global_config.location + if not credentials: + credentials = initializer.global_config.credentials + + bigquery_client = bigquery.Client(project=project, credentials=credentials) + location_str = location.lower().replace("-", "_") + dataset_id = bigquery.DatasetReference( + project, f"{_DEFAULT_BQ_DATASET_PREFIX}_{location_str}" + ) + dataset = bigquery.Dataset(dataset_ref=dataset_id) + dataset.location = location + bigquery_client.create_dataset(dataset, exists_ok=True) + return f"{dataset_id.project}.{dataset_id.dataset_id}" + + +def _generate_target_table_id(dataset_id: str): + return f"{dataset_id}.{_DEFAULT_BQ_TABLE_PREFIX}_{str(uuid.uuid4())}" + + +def construct_single_turn_template( + *, + prompt: str = None, + response: Optional[str] = None, + system_instruction: Optional[str] = None, + model: Optional[str] = None, + cached_content: Optional[str] = None, + tools: Optional[List[generative_models.Tool]] = None, + tool_config: Optional[generative_models.ToolConfig] = None, + safety_settings: Optional[List[generative_models.SafetySetting]] = None, + generation_config: Optional[generative_models.GenerationConfig] = None, + field_mapping: List[Dict[str, str]] = None, +) -> "GeminiTemplateConfig": + """Constructs a GeminiTemplateConfig object for single-turn cases. + + Example: + template_config = dataset.construct_single_turn_template( + prompt = "Which flower is this {flower_image} ?", + response="This is a {label}.", + system_instruction="You are a botanical classifier." + ) + + Args: + + prompt (str): + Required. User input. + response (str): + Optional. Model response to user input. + system_instruction (str): + Optional. System instructions for the model. + model (str): + Optional. The model to use for the GeminiExample. + cached_content (str): + Optional. The cached content to use for the GeminiExample. + tools (List[Tool]): + Optional. The tools to use for the GeminiExample. + tool_config (ToolConfig): + Optional. The tool config to use for the GeminiExample. + safety_settings (List[SafetySetting]): + Optional. The safety settings to use for the GeminiExample. + generation_config (GenerationConfig): + Optional. The generation config to use for the GeminiExample. + field_mapping (List[Dict[str, str]]): + Optional. Mapping of placeholders to dataset columns. + + Returns: + A GeminiTemplateConfig object. + """ + contents = [] + contents.append( + generative_models.Content( + role="user", + parts=[ + generative_models.Part.from_text(prompt), + ], + ) + ) + if response: + contents.append( + generative_models.Content( + role="model", + parts=[ + generative_models.Part.from_text(response), + ], + ) + ) + if system_instruction: + system_instruction = generative_models.Content( + parts=[ + generative_models.Part.from_text(system_instruction), + ], + ) + + # Set up GeminiExample. + gemini_example = GeminiExample( + model=model, + contents=contents, + system_instruction=system_instruction, + cached_content=cached_content, + tools=tools, + tool_config=tool_config, + safety_settings=safety_settings, + generation_config=generation_config, + ) + return GeminiTemplateConfig( + gemini_example=gemini_example, field_mapping=field_mapping + ) + + +class GeminiExample: + """A class representing a Gemini example.""" + + Content = generative_models.Content + Part = generative_models.Part + Tool = generative_models.Tool + ToolConfig = generative_models.ToolConfig + SafetySetting = generative_models.SafetySetting + GenerationConfig = generative_models.GenerationConfig + + def __init__( + self, + *, + model: Optional[str] = None, + contents: Optional[List[Content]] = None, + system_instruction: Optional[Content] = None, + cached_content: Optional[str] = None, + tools: Optional[List[Tool]] = None, + tool_config: Optional[ToolConfig] = None, + safety_settings: Optional[List[SafetySetting]] = None, + generation_config: Optional[GenerationConfig] = None, + ): + """Initializes a GeminiExample. + + Args: + model (str): + Optional. The model to use for the GeminiExample. + contents (List[Content]): + Optional. The contents to use for the GeminiExample. + system_instruction (Content): + Optional. The system instruction to use for the GeminiExample. + cached_content (str): + Optional. The cached content to use for the GeminiExample. + tools (List[Tool]): + Optional. The tools to use for the GeminiExample. + tool_config (ToolConfig): + Optional. The tool config to use for the GeminiExample. + safety_settings (List[SafetySetting]): + Optional. The safety settings to use for the GeminiExample. + generation_config (GenerationConfig): + Optional. The generation config to use for the GeminiExample. + """ + self._raw_gemini_example = gca_dataset_service.GeminiExample() + self.model = model + self.contents = contents + self.system_instruction = system_instruction + self.cached_content = cached_content + self.tools = tools + self.tool_config = tool_config + self.safety_settings = safety_settings + self.generation_config = generation_config + + @property + def model(self) -> Optional[str]: + """The model to use for the GeminiExample.""" + if not self._raw_gemini_example.model: + return None + return self._raw_gemini_example.model + + @model.setter + def model(self, model: str): + """Setter for the model.""" + self._raw_gemini_example.model = model + + @property + def contents(self) -> Optional[List[Content]]: + """The contents of the GeminiExample.""" + if not self._raw_gemini_example.contents: + return None + return [ + generative_models.Content._from_gapic(content) + for content in self._raw_gemini_example.contents + ] + + @contents.setter + def contents(self, contents: Optional[List[Content]]): + """Setter for the contents.""" + if contents is None: + self._raw_gemini_example.contents = None + else: + self._raw_gemini_example.contents = [ + content._raw_content for content in contents + ] + + @property + def system_instruction(self) -> Optional[Content]: + """The system instruction of the GeminiExample.""" + if not self._raw_gemini_example.system_instruction: + return None + return generative_models.Content._from_gapic( + self._raw_gemini_example.system_instruction + ) + + @system_instruction.setter + def system_instruction(self, system_instruction: Optional[Content]): + """Setter for the system instruction.""" + if system_instruction is None: + self._raw_gemini_example.system_instruction = None + else: + self._raw_gemini_example.system_instruction = ( + system_instruction._raw_content + ) + + @property + def cached_content(self) -> Optional[str]: + """The cached content of the GeminiExample.""" + if not self._raw_gemini_example.cached_content: + return None + return self._raw_gemini_example.cached_content + + @cached_content.setter + def cached_content(self, cached_content: Optional[str]): + """Setter for the cached content.""" + self._raw_gemini_example.cached_content = cached_content + + @property + def tools(self) -> Optional[List[Tool]]: + """The tools of the GeminiExample.""" + if not self._raw_gemini_example.tools: + return None + return [ + generative_models.Tool._from_gapic(tool) + for tool in self._raw_gemini_example.tools + ] + + @tools.setter + def tools(self, tools: Optional[List[Tool]]): + """Setter for the tools.""" + if tools is None: + self._raw_gemini_example.tools = None + else: + self._raw_gemini_example.tools = [tool._raw_tool for tool in tools] + + @property + def tool_config(self) -> Optional[ToolConfig]: + """The tool config of the GeminiExample.""" + if not self._raw_gemini_example.tool_config: + return None + return generative_models.ToolConfig._from_gapic( + self._raw_gemini_example.tool_config + ) + + @tool_config.setter + def tool_config(self, tool_config: Optional[ToolConfig]): + """Setter for the tool config.""" + if tool_config is None: + self._raw_gemini_example.tool_config = None + else: + self._raw_gemini_example.tool_config = tool_config._gapic_tool_config + + @property + def safety_settings(self) -> Optional[List[SafetySetting]]: + """The safety settings of the GeminiExample.""" + if not self._raw_gemini_example.safety_settings: + return None + return [ + generative_models.SafetySetting._from_gapic(safety_setting) + for safety_setting in self._raw_gemini_example.safety_settings + ] + + @safety_settings.setter + def safety_settings(self, safety_settings: Optional[List[SafetySetting]]): + """Setter for the safety settings.""" + if safety_settings is None: + self._raw_gemini_example.safety_settings = None + else: + self._raw_gemini_example.safety_settings = [ + safety_setting._raw_safety_setting for safety_setting in safety_settings + ] + + @property + def generation_config(self) -> Optional[GenerationConfig]: + """The generation config of the GeminiExample.""" + if not self._raw_gemini_example.generation_config: + return None + return generative_models.GenerationConfig._from_gapic( + self._raw_gemini_example.generation_config + ) + + @generation_config.setter + def generation_config(self, generation_config: Optional[GenerationConfig]): + """Setter for the generation config.""" + if generation_config is None: + self._raw_gemini_example.generation_config = None + else: + self._raw_gemini_example.generation_config = ( + generation_config._raw_generation_config + ) + + @classmethod + def _from_gapic( + cls, raw_gemini_example: gca_dataset_service.GeminiExample + ) -> "GeminiExample": + example = cls() + example._raw_gemini_example = raw_gemini_example + return example + + @classmethod + def from_prompt(cls, prompt: prompts.Prompt) -> "GeminiExample": + """Creates a GeminiExample from a Prompt. + + Args: + prompt (prompts.Prompt): + The Prompt to use for the GeminiExample. + Returns: + A GeminiExample created from the Prompt. + """ + contents = prompt.assemble_contents() + if prompt.system_instruction: + system_instructions = generative_models.Content._from_gapic( + _generative_models._to_content(prompt.system_instruction) + ) + else: + system_instructions = None + # TODO(b/404208669): Prompt management SDK has a wrong type annotation + # for safety_settings: It's annotated as SafetySetting, but the + # validation assumes it's a sequence. + if isinstance(prompt.safety_settings, generative_models.SafetySetting): + safety_settings = [prompt.safety_settings] + else: + safety_settings = prompt.safety_settings + + return cls( + model=prompt.model_name, + contents=contents, + system_instruction=system_instructions, + tools=prompt.tools, + tool_config=prompt.tool_config, + safety_settings=safety_settings, + generation_config=prompt.generation_config, + ) + + def __repr__(self) -> str: + return self._raw_gemini_example.__repr__() + + +class GeminiTemplateConfig: + """A class representing a Gemini template config. + + A Gemini template config contains a GeminiExample, which specifies the + prompt including placeholders, and a field mapping, which specifies how to + map the placeholders to the corresponding column in the BigQuery table of + the dataset. If no field mapping is provided, the default behavior is to + use the placeholder name as the column name. + """ + + def __init__( + self, + *, + gemini_example: Optional[GeminiExample] = None, + field_mapping: Optional[Dict[str, str]] = None, + ): + """Initializes a GeminiTemplateConfig. + + Args: + gemini_example (GeminiExample): + Optional. The GeminiExample to use for the template config. If + not provided, a default GeminiExample will be used. + field_mapping (Dict[str, str]): + Optional. The field mapping to use for the template config. If + not provided, all placeholders in the GeminiExample will be + mapped to the corresponding column in the BigQuery table of the + dataset. + """ + raw_gemini_example = ( + gemini_example._raw_gemini_example if gemini_example is not None else None + ) + self._raw_gemini_template_config = gca_dataset_service.GeminiTemplateConfig( + gemini_example=raw_gemini_example, field_mapping=field_mapping + ) + + @classmethod + def _from_gapic( + cls, raw_gemini_template_config: gca_dataset_service.GeminiTemplateConfig + ) -> None: + template_config = cls() + template_config._raw_gemini_template_config = raw_gemini_template_config + return template_config + + @property + def gemini_example(self) -> Optional[GeminiExample]: + """The GeminiExample of this template config.""" + return GeminiExample._from_gapic( + self._raw_gemini_template_config.gemini_example + ) + + @property + def field_mapping(self) -> Optional[Dict[str, str]]: + """The field mapping of this template config.""" + return dict(self._raw_gemini_template_config.field_mapping) + + def __repr__(self) -> str: + return self._raw_gemini_template_config.__repr__() + + +@dataclasses.dataclass(frozen=True) +class TuningResourceUsageAssessmentResult: + """The result of a tuning resource usage assessment. + + Attributes: + token_count (int): + The number of tokens in the dataset. + billable_character_count (int): + The number of billable characters in the dataset. + """ + + token_count: int + billable_character_count: int + + +@dataclasses.dataclass(frozen=True) +class TuningValidationAssessmentResult: + """The result of a tuning validation assessment. + + Attributes: + errors (List[str]): + The list of errors found in the dataset. + """ + + errors: List[str] + + +@dataclasses.dataclass(frozen=True) +class BatchPredictionResourceUsageAssessmentResult: + """The result of a batch prediction resource usage assessment. + + Attributes: + token_count (int): + Number of tokens in the dataset. + audio_token_count (int): + Number of audio tokens in the dataset. + """ + + token_count: int + audio_token_count: int + + +class MultimodalDataset(base.VertexAiResourceNounWithFutureManager): + """A class representing a unified multimodal dataset.""" + + client_class = utils.DatasetClientWithOverride + _resource_noun = "datasets" + _getter_method = "get_dataset" + _list_method = "list_datasets" + _delete_method = "delete_dataset" + _parse_resource_name_method = "parse_dataset_path" + _format_resource_name_method = "dataset_path" + _DEFAULT_REQUEST_COLUMN_NAME = "requests" + + def __init__( + self, + *, + dataset_name: str, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + ): + """Retrieves an existing multimodal dataset given a resource name. + + Args: + dataset_name (str): + Required. A fully-qualified dataset resource name or dataset ID. + Example: "projects/123/locations/us-central1/datasets/456" or + "456" when project and location are initialized or passed. + project (str): + Optional project to retrieve dataset from. If not set, project + set in aiplatform.init will be used. + location (str): + Optional location to retrieve dataset from. If not set, location + set in aiplatform.init will be used. + credentials (auth_credentials.Credentials): + Custom credentials to use to retrieve this Dataset. Overrides + credentials set in aiplatform.init. + """ + super().__init__( + project=project, + location=location, + credentials=credentials, + resource_name=dataset_name, + ) + self.api_client = self.api_client.select_version(compat.V1BETA1) + self._gca_resource = self._get_gca_resource(resource_name=dataset_name) + self._validate_metadata_schema_uri() + + @property + def metadata_schema_uri(self) -> str: + """The metadata schema uri of this dataset resource.""" + self._assert_gca_resource_is_available() + return self._gca_resource.metadata_schema_uri + + def _validate_metadata_schema_uri(self): + if self.metadata_schema_uri != _MULTIMODAL_METADATA_SCHEMA_URI: + + raise ValueError( + f"Dataset {self.resource_name} is not a multimodal dataset" + ) + + @property + def bigquery_table(self) -> str: + """The BigQuery table of this dataset resource, such as + "bq://project.dataset.table".""" + self._assert_gca_resource_is_available() + return self._gca_resource.metadata[_INPUT_CONFIG_FIELD][_BIGQUERY_SOURCE_FIELD][ + _URI_FIELD + ] + + @classmethod + def from_bigquery( + cls, + *, + bigquery_source: str, + display_name: Optional[str] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + labels: Optional[Dict[str, str]] = None, + sync: bool = True, + create_request_timeout: Optional[float] = None, + ) -> "MultimodalDataset": + """Creates a multimodal dataset from a BigQuery table. + + Args: + bigquery_source (str): + Required. The BigQuery table URI or ID to be used for the created + dataset, which can be in the format of "bq://dataset.table", + "bq://project.dataset.table" or "project.dataset.table". + display_name (str): + Optional. The user-defined name of the dataset. The name can be + up to 128 characters long and can consist of any UTF-8 + characters. + project (str): + Optional. Project to create this dataset in. Overrides project + set in aiplatform.init. + location (str): + Optional. Location to create this dataset in. Overrides location + set in aiplatform.init. + credentials (auth_credentials.Credentials): + Optional. Custom credentials to use to create this dataset. + Overrides credentials set in aiplatform.init. + labels (Dict[str, str]): + Optional. The labels with user-defined metadata to organize your + datasets. Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only contain lowercase + letters, numeric characters, underscores and dashes. + International characters are allowed. See https://goo.gl/xmQnxf + for more information on and examples of labels. No more than 64 + user labels can be associated with one dataset (System labels + are excluded). System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + sync (bool): + Optional. Whether to execute this method synchronously. If + False, this method will be executed in concurrent Future and any + downstream object will be immediately returned and synced when + the Future has completed. + create_request_timeout (float): + Optional. The timeout for the dataset creation request. + + Returns: + dataset (MultimodalDataset): + The created multimodal dataset. + """ + if not bigquery_source.startswith("bq://"): + bigquery_source = f"bq://{bigquery_source}" + return cls._create_from_bigquery( + metadata=_get_metadata_for_bq(bq_uri=bigquery_source), + display_name=display_name, + project=project, + location=location, + credentials=credentials, + labels=labels, + sync=sync, + create_request_timeout=create_request_timeout, + ) + + @classmethod + def from_pandas( + cls, + *, + dataframe: "pandas.DataFrame", # type: ignore # noqa: F821 + target_table_id: Optional[str] = None, + display_name: Optional[str] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + labels: Optional[Dict[str, str]] = None, + sync: bool = True, + create_request_timeout: Optional[float] = None, + ) -> "MultimodalDataset": + """Creates a multimodal dataset from a pandas dataframe. + + Args: + dataframe (pandas.DataFrame): + The pandas dataframe to be used for the created dataset. + target_table_id (str): + Optional. The BigQuery table id where the dataframe will be + uploaded. The table id can be in the format of "dataset.table" + or "project.dataset.table". If a table already exists with the + given table id, it will be overwritten. Note that the BigQuery + dataset must already exist and be in the same location as the + multimodal dataset. If not provided, a generated table id will + be created in the `vertex_datasets` dataset (e.g. + `project.vertex_datasets_us_central1.multimodal_dataset_4cbf7ffd`). + display_name (str): + Optional. The user-defined name of the dataset. The name can be + up to 128 characters long and can consist of any UTF-8 + characters. + project (str): + Optional. Project to create this dataset in. Overrides project + set in aiplatform.init. + location (str): + Optional. Location to create this dataset in. Overrides location + set in aiplatform.init. + credentials (auth_credentials.Credentials): + Optional. Custom credentials to use to create this dataset. + Overrides credentials set in aiplatform.init. + labels (Dict[str, str]): + Optional. The labels with user-defined metadata to organize your + datasets. Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only contain lowercase + letters, numeric characters, underscores and dashes. + International characters are allowed. See https://goo.gl/xmQnxf + for more information on and examples of labels. No more than 64 + user labels can be associated with one dataset (System labels + are excluded). System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + sync (bool): + Optional. Whether to execute this method synchronously. If + False, this method will be executed in concurrent Future and any + downstream object will be immediately returned and synced when + the Future has completed. + create_request_timeout (float): + Optional. The timeout for the dataset creation request. + + Returns: + dataset (MultimodalDataset): + The created multimodal dataset. + """ + bigframes = _try_import_bigframes() + from google.cloud import bigquery # pylint: disable=g-import-not-at-top + + if not project: + project = initializer.global_config.project + if not location: + location = initializer.global_config.location + if not credentials: + credentials = initializer.global_config.credentials + + if target_table_id: + target_table_id = _normalize_and_validate_table_id( + table_id=target_table_id, + project=project, + vertex_location=location, + credentials=credentials, + ) + else: + dataset_id = _create_default_bigquery_dataset_if_not_exists( + project=project, location=location, credentials=credentials + ) + target_table_id = _generate_target_table_id(dataset_id) + + session_options = bigframes.BigQueryOptions( + credentials=credentials, + project=project, + location=location, + ) + with bigframes.connect(session_options) as session: + temp_bigframes_df = session.read_pandas(dataframe) + temp_table_id = temp_bigframes_df.to_gbq() + client = bigquery.Client(project=project, credentials=credentials) + copy_job = client.copy_table( + sources=temp_table_id, + destination=target_table_id, + ) + copy_job.result() + + bigquery_uri = f"bq://{target_table_id}" + return cls._create_from_bigquery( + metadata=_get_metadata_for_bq(bq_uri=bigquery_uri), + display_name=display_name, + project=project, + location=location, + credentials=credentials, + labels=labels, + sync=sync, + create_request_timeout=create_request_timeout, + ) + + @classmethod + def from_bigframes( + cls, + *, + dataframe: "bigframes.pandas.DataFrame", # type: ignore # noqa: F821 + target_table_id: Optional[str] = None, + display_name: Optional[str] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + labels: Optional[Dict[str, str]] = None, + sync: bool = True, + create_request_timeout: Optional[float] = None, + ) -> "MultimodalDataset": + """Creates a multimodal dataset from a bigframes dataframe. + + Args: + dataframe (bigframes.pandas.DataFrame): + The BigFrames dataframe that will be used for the created + dataset. + target_table_id (str): + Optional. The BigQuery table id where the dataframe will be + uploaded. The table id can be in the format of "dataset.table" + or "project.dataset.table". If a table already exists with the + given table id, it will be overwritten. Note that the BigQuery + dataset must already exist and be in the same location as the + multimodal dataset. If not provided, a generated table id will + be created in the `vertex_datasets` dataset (e.g. + `project.vertex_datasets_us_central1.multimodal_dataset_4cbf7ffd`). + display_name (str): + Optional. The user-defined name of the dataset. The name can be + up to 128 characters long and can consist of any UTF-8 + characters. + project (str): + Optional. Project to create this dataset in. Overrides project + set in aiplatform.init. + location (str): + Optional. Location to create this dataset in. Overrides location + set in aiplatform.init. + credentials (auth_credentials.Credentials): + Optional. Custom credentials to use to create this dataset. + Overrides credentials set in aiplatform.init. + labels (Dict[str, str]): + Optional. The labels with user-defined metadata to organize your + datasets. Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only contain lowercase + letters, numeric characters, underscores and dashes. + International characters are allowed. See https://goo.gl/xmQnxf + for more information on and examples of labels. No more than 64 + user labels can be associated with one dataset (System labels + are excluded). System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + sync (bool): + Optional. Whether to execute this method synchronously. If + False, this method will be executed in concurrent Future and any + downstream object will be immediately returned and synced when + the Future has completed. + create_request_timeout (float): + Optional. The timeout for the dataset creation request. + + Returns: + The created multimodal dataset. + """ + from google.cloud import bigquery # pylint: disable=g-import-not-at-top + + if target_table_id: + target_table_id = _normalize_and_validate_table_id( + table_id=target_table_id, + project=project, + vertex_location=location, + credentials=credentials, + ) + else: + dataset_id = _create_default_bigquery_dataset_if_not_exists( + project=project, location=location, credentials=credentials + ) + target_table_id = _generate_target_table_id(dataset_id) + + if not project: + project = initializer.global_config.project + + temp_table_id = dataframe.to_gbq() + client = bigquery.Client(project=project, credentials=credentials) + copy_job = client.copy_table( + sources=temp_table_id, + destination=target_table_id, + ) + copy_job.result() + + bigquery_uri = f"bq://{target_table_id}" + return cls._create_from_bigquery( + metadata=_get_metadata_for_bq(bq_uri=bigquery_uri), + display_name=display_name, + project=project, + location=location, + credentials=credentials, + labels=labels, + sync=sync, + create_request_timeout=create_request_timeout, + ) + + @classmethod + def from_gemini_request_jsonl( + cls, + *, + gcs_uri: str, + target_table_id: Optional[str] = None, + display_name: Optional[str] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + labels: Optional[Dict[str, str]] = None, + sync: bool = True, + create_request_timeout: Optional[float] = None, + ) -> "MultimodalDataset": + """Creates a multimodal dataset from a JSONL file stored on GCS. + + The JSONL file should contain a instances of Gemini + `GenerateContentRequest` on each line. The data will be stored in a + BigQuery table with a single column called "requests". The + request_column_name in the dataset metadata will be set to "requests". + + Args: + gcs_uri (str): + The Google Cloud Storage URI of the JSONL file to import. + For example, 'gs://my-bucket/path/to/data.jsonl' + target_table_id (str): + Optional. The BigQuery table id where the dataframe will be + uploaded. The table id can be in the format of "dataset.table" + or "project.dataset.table". If a table already exists with the + given table id, it will be overwritten. Note that the BigQuery + dataset must already exist and be in the same location as the + multimodal dataset. If not provided, a generated table id will + be created in the `vertex_datasets` dataset (e.g. + `project.vertex_datasets_us_central1.multimodal_dataset_4cbf7ffd`). + display_name (str): + Optional. The user-defined name of the dataset. The name can be + up to 128 characters long and can consist of any UTF-8 + characters. + project (str): + Optional. Project to create this dataset in. Overrides project + set in aiplatform.init. + location (str): + Optional. Location to create this dataset in. Overrides location + set in aiplatform.init. + credentials (auth_credentials.Credentials): + Optional. Custom credentials to use to create this dataset. + Overrides credentials set in aiplatform.init. + labels (Dict[str, str]): + Optional. The labels with user-defined metadata to organize your + datasets. Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only contain lowercase + letters, numeric characters, underscores and dashes. + International characters are allowed. See https://goo.gl/xmQnxf + for more information on and examples of labels. No more than 64 + user labels can be associated with one dataset (System labels + are excluded). System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + sync (bool): + Optional. Whether to execute this method synchronously. If + False, this method will be executed in concurrent Future and any + downstream object will be immediately returned and synced when + the Future has completed. + create_request_timeout (float): + Optional. The timeout for the dataset creation request. + + Returns: + The created multimodal dataset. + """ + bigframes = _try_import_bigframes() + from google.cloud import bigquery # pylint: disable=g-import-not-at-top + + if not project: + project = initializer.global_config.project + if not location: + location = initializer.global_config.location + if not credentials: + credentials = initializer.global_config.credentials + + if target_table_id: + target_table_id = _normalize_and_validate_table_id( + table_id=target_table_id, + project=project, + vertex_location=location, + credentials=credentials, + ) + else: + dataset_id = _create_default_bigquery_dataset_if_not_exists( + project=project, location=location, credentials=credentials + ) + target_table_id = _generate_target_table_id(dataset_id) + + gcs_uri_prefix = "gs://" + if gcs_uri.startswith(gcs_uri_prefix): + gcs_uri = gcs_uri[len(gcs_uri_prefix) :] + parts = gcs_uri.split("/", 1) + if len(parts) != 2: + raise ValueError( + "Invalid GCS URI format. Expected: gs://bucket-name/object-path" + ) + bucket_name = parts[0] + blob_name = parts[1] + + storage_client = storage.Client(project=project) + bucket = storage_client.bucket(bucket_name) + blob = bucket.blob(blob_name) + request_column_name = cls._DEFAULT_REQUEST_COLUMN_NAME + + jsonl_string = blob.download_as_text() + lines = [line.strip() for line in jsonl_string.splitlines() if line.strip()] + json_string = json.dumps({request_column_name: lines}) + + session_options = bigframes.BigQueryOptions( + credentials=credentials, + project=project, + location=location, + ) + with bigframes.connect(session_options) as session: + temp_bigframes_df = session.read_json(io.StringIO(json_string)) + temp_bigframes_df[request_column_name] = bigframes.bigquery.parse_json( + temp_bigframes_df[request_column_name] + ) + temp_table_id = temp_bigframes_df.to_gbq() + client = bigquery.Client(project=project, credentials=credentials) + copy_job = client.copy_table( + sources=temp_table_id, + destination=target_table_id, + ) + copy_job.result() + + bigquery_uri = f"bq://{target_table_id}" + return cls._create_from_bigquery( + metadata=_get_metadata_for_bq( + bq_uri=bigquery_uri, request_column_name=request_column_name + ), + display_name=display_name, + project=project, + location=location, + credentials=credentials, + labels=labels, + sync=sync, + create_request_timeout=create_request_timeout, + ) + + def to_bigframes(self) -> "bigframes.pandas.DataFrame": # type: ignore # noqa: F821 + """Converts a multimodal dataset to a BigFrames dataframe. + + This is the preferred method to inspect the multimodal dataset in a + notebook. + + Returns: + A BigFrames dataframe. + """ + bigframes = _try_import_bigframes() + return bigframes.pandas.read_gbq_table(self.bigquery_table.lstrip("bq://")) + + @classmethod + @base.optional_sync() + def _create_from_bigquery( + cls, + *, + metadata: struct_pb2.Value, + display_name: Optional[str] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + labels: Optional[Dict[str, str]] = None, + sync: bool = True, + create_request_timeout: Optional[float] = None, + ) -> "MultimodalDataset": + if not display_name: + display_name = cls._generate_display_name() + utils.validate_display_name(display_name) + if labels: + utils.validate_labels(labels) + if not project: + project = initializer.global_config.project + if not location: + location = initializer.global_config.location + if not credentials: + credentials = initializer.global_config.credentials + + dataset = gca_dataset.Dataset( + display_name=display_name, + metadata_schema_uri=_MULTIMODAL_METADATA_SCHEMA_URI, + metadata=metadata, + labels=labels, + ) + parent = initializer.global_config.common_location_path( + project=project, location=location + ) + api_client = cls._instantiate_client( + location=location, credentials=credentials + ).select_version(compat.V1BETA1) + create_lro = api_client.create_dataset( + dataset=dataset, parent=parent, timeout=create_request_timeout + ) + _LOGGER.log_create_with_lro(cls, create_lro) + created_dataset = create_lro.result(timeout=None) + _LOGGER.log_create_complete(cls, created_dataset, "ds") + return cls(dataset_name=created_dataset.name) + + def update( + self, + *, + display_name: Optional[str] = None, + labels: Optional[Dict[str, str]] = None, + description: Optional[str] = None, + update_request_timeout: Optional[float] = None, + ): + """Update the dataset. + + Updatable fields: + - ``display_name`` + - ``labels`` + - ``description`` + + Args: + display_name (str): + Optional. The user-defined name of the Dataset. The name can be + up to 128 characters long and can be consist of any UTF-8 + characters. + labels (Dict[str, str]): + Optional. Labels with user-defined metadata to organize your + datasets. Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only contain lowercase + letters, numeric characters, underscores and dashes. + International characters are allowed. No more than 64 user + labels can be associated with one dataset (System labels are + excluded). See https://goo.gl/xmQnxf for more information and + examples of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + description (str): + Optional. The description of the Dataset. + update_request_timeout (float): + Optional. The timeout for the update request in seconds. + + Returns: + dataset (MultimodalDataset): + Updated dataset. + """ + update_mask = field_mask_pb2.FieldMask() + if display_name: + update_mask.paths.append("display_name") + + if labels: + update_mask.paths.append("labels") + + if description: + update_mask.paths.append("description") + + update_dataset = gca_dataset.Dataset( + name=self.resource_name, + display_name=display_name, + description=description, + labels=labels, + ) + + self._gca_resource = self.api_client.update_dataset( + dataset=update_dataset, + update_mask=update_mask, + timeout=update_request_timeout, + ) + + return self + + def attach_template_config( + self, + *, + template_config: Optional[GeminiTemplateConfig] = None, + prompt: Optional[prompts.Prompt] = None, + update_request_timeout: Optional[float] = None, + ): + """Attach a template config or prompt to the dataset. + + Args: + template_config (GeminiTemplateConfig): + Optional. The template config to attach to the dataset. + prompt (prompts.Prompt): + Optional. The prompt to attach to the dataset. + update_request_timeout (float): + Optional. The timeout for the update request in seconds. + + Returns: + MultimodalDataset - The updated dataset. + """ + if not (template_config or prompt): + raise ValueError("Either template_config or prompt must be provided.") + if template_config and prompt: + raise ValueError("Only one of template_config or prompt can be provided.") + + raw_template_config = None + if template_config: + raw_template_config = template_config._raw_gemini_template_config + prompt_uri = None + if prompt: + if prompt.prompt_id: + saved_prompt = prompt + else: + saved_prompt = prompts.create_version(prompt) + location = initializer.global_config.location + project = initializer.global_config.project + # TODO(b/404208669): Support prompt versions. + prompt_uri = f"projects/{project}/locations/{location}/datasets/{saved_prompt.prompt_id}" + + update_mask = field_mask_pb2.FieldMask(paths=["metadata"]) + update_dataset = gca_dataset.Dataset( + name=self.resource_name, + metadata=_get_metadata_for_bq( + bq_uri=self.bigquery_table, + template_config=raw_template_config, + prompt_uri=prompt_uri, + ), + ) + self._gca_resource = self.api_client.update_dataset( + dataset=update_dataset, + update_mask=update_mask, + timeout=update_request_timeout, + ) + return self + + def has_template_config(self) -> bool: + """Returns true if the dataset has a template config attached.""" + self._assert_gca_resource_is_available() + return _GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD in self._gca_resource.metadata + + @property + def template_config(self) -> Optional[GeminiTemplateConfig]: + """Return a copy of the template config attached to this dataset.""" + self._assert_gca_resource_is_available() + # Dataset has no attached template. + if _GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD not in self._gca_resource.metadata: + return None + # Dataset has a template attached as a GeminiTemplateConfig. + if ( + _GEMINI_TEMPLATE_CONFIG_FIELD + in self._gca_resource.metadata[_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD] + ): + struct_proto_container = self._gca_resource.metadata[ + _GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD + ].pb + struct_proto = struct_proto_container.get(_GEMINI_TEMPLATE_CONFIG_FIELD) + # Detour via json (instead of dict) to avoid field renaming mismatches. + gapic_template_config = gca_dataset_service.GeminiTemplateConfig.from_json( + json_format.MessageToJson(struct_proto) + ) + return GeminiTemplateConfig._from_gapic(gapic_template_config) + # Dataset has a template attached as a Prompt resource URI. + if ( + _PROMPT_URI_FIELD + in self._gca_resource.metadata[_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD] + ): + prompt_uri = ( + self._gca_resource.metadata[_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD] + .pb.get(_PROMPT_URI_FIELD) + .string_value + ) + resource_name_prefix = f"projects/{initializer.global_config.project}/locations/{initializer.global_config.location}/datasets/" + if not prompt_uri.startswith(resource_name_prefix): + prompt_location = prompt_uri.split("/")[3] + prompt_project = prompt_uri.split("/")[1] + raise ValueError( + "Attached prompt is not in the currently configured global " + "project and/or location. (Configured project/location: " + f"{initializer.global_config.location}, " + f"{initializer.global_config.project}; Attached prompt: " + f"{prompt_location}, {prompt_project})" + ) + prompt_id = prompt_uri + if prompt_id.startswith(resource_name_prefix): + prompt_id = prompt_id[len(resource_name_prefix) :] + prompt = prompts.get(prompt_id) + return GeminiTemplateConfig( + gemini_example=GeminiExample.from_prompt(prompt), field_mapping={} + ) + + return None + + @property + def request_column_name(self) -> Optional[str]: + """Return the request column name if it is set in the dataset metadata. + + The request column name specifies a column in the dataset that contains + assembled Gemini `GenerateContentRequest` instances. + """ + + self._assert_gca_resource_is_available() + # Dataset has no attached template. + if _GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD not in self._gca_resource.metadata: + return None + if ( + _REQUEST_COLUMN_NAME_FIELD + not in self._gca_resource.metadata[_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD] + ): + return None + return self._gca_resource.metadata[_GEMINI_TEMPLATE_CONFIG_SOURCE_FIELD][ + _REQUEST_COLUMN_NAME_FIELD + ] + + def assemble( + self, + *, + template_config: Optional[GeminiTemplateConfig] = None, + load_dataframe: bool = True, + assemble_request_timeout: Optional[float] = None, + ) -> Tuple[str, "bigframes.pandas.DataFrame"]: # type: ignore # noqa: F821 + """Assemble the dataset into a BigQuery table. + + Args: + template_config (GeminiTemplateConfig): + Optional. The template config to use to assemble the dataset. + If not provided, the template config attached to the dataset + will be used. + load_dataframe (bool): + Optional. Whether to load the assembled dataset into a BigFrames + DataFrame. + assemble_request_timeout (float): + Optional. The timeout for the assemble request in seconds. + + Returns: + Tuple[str, "bigframes.pandas.DataFrame"]: + A tuple containing the ID of the assembled dataset in BigQuery + and the assembled dataset as a BigFrames DataFrame if + load_dataframe is True, otherwise None. + """ + bigframes = _try_import_bigframes() + request = gca_dataset_service.AssembleDataRequest( + name=self.resource_name, + gemini_request_read_config=self._build_gemini_request_read_config( + template_config + ), + ) + + assemble_lro = self.api_client.assemble_data( + request=request, timeout=assemble_request_timeout + ) + _LOGGER.log_action_started_against_resource_with_lro( + "Assemble", "data", self.__class__, assemble_lro + ) + result = assemble_lro.result(timeout=None) + _LOGGER.log_action_completed_against_resource("data", "assembled", self) + table_id = result.bigquery_destination.lstrip("bq://") + if load_dataframe: + session_options = bigframes.BigQueryOptions( + credentials=initializer.global_config.credentials, + project=initializer.global_config.project, + location=initializer.global_config.location, + ) + with bigframes.connect(session_options) as session: + df = session.read_gbq(table_id) + else: + df = None + + return (table_id, df) + + def assess_tuning_resources( + self, + *, + model_name: str, + template_config: Optional[GeminiTemplateConfig] = None, + assess_request_timeout: Optional[float] = None, + ) -> TuningResourceUsageAssessmentResult: + """Assess the tuning resources required for a given model. + + Args: + model_name (str): + Required. The name of the model to assess the tuning resources + for. + template_config (GeminiTemplateConfig): + Optional. The template config used to assemble the dataset + before assessing the tuning resources. If not provided, the + template config attached to the dataset will be used. Required + if no template config is attached to the dataset. + assess_request_timeout (float): + Optional. The timeout for the assess tuning resources request. + Returns: + A dict containing the tuning resource usage assessment result. The + dict contains the following keys: + - token_count: The number of tokens in the dataset. + - billable_character_count: The number of billable characters in the + dataset. + + """ + request = self._build_assess_data_request(template_config) + request.tuning_resource_usage_assessment_config = ( + gca_dataset_service.AssessDataRequest.TuningResourceUsageAssessmentConfig( + model_name=model_name + ) + ) + + assessment_result = ( + self.api_client.assess_data(request=request, timeout=assess_request_timeout) + .result(timeout=None) + .tuning_resource_usage_assessment_result + ) + return TuningResourceUsageAssessmentResult( + token_count=assessment_result.token_count, + billable_character_count=assessment_result.billable_character_count, + ) + + def assess_tuning_validity( + self, + *, + model_name: str, + dataset_usage: str, + template_config: Optional[GeminiTemplateConfig] = None, + assess_request_timeout: Optional[float] = None, + ) -> TuningValidationAssessmentResult: + """Assess if the assembled dataset is valid in terms of tuning a given + model. + + Args: + model_name (str): + Required. The name of the model to assess the tuning validity + for. + dataset_usage (str): + Required. The dataset usage to assess the tuning validity for. + Must be one of the following: SFT_TRAINING, SFT_VALIDATION. + template_config (GeminiTemplateConfig): + Optional. The template config used to assemble the dataset + before assessing the tuning validity. If not provided, the + template config attached to the dataset will be used. Required + if no template config is attached to the dataset. + assess_request_timeout (float): + Optional. The timeout for the assess tuning validity request. + Returns: + A dict containing the tuning validity assessment result. The dict + contains the following keys: + - errors: A list of errors that occurred during the tuning validity + assessment. + """ + DatasetUsage = ( + gca_dataset_service.AssessDataRequest.TuningValidationAssessmentConfig.DatasetUsage + ) + try: + dataset_usage_enum = DatasetUsage[dataset_usage] + except KeyError as e: + valid_dataset_usage_names = [ + e.name for e in DatasetUsage if e.name != "DATASET_USAGE_UNSPECIFIED" + ] + raise ValueError( + f"Argument 'dataset_usage' must be one of the following: " + f"{', '.join(valid_dataset_usage_names)}." + ) from e + if dataset_usage_enum == DatasetUsage.DATASET_USAGE_UNSPECIFIED: + raise ValueError("Dataset usage must be specified.") + + request = self._build_assess_data_request(template_config) + request.tuning_validation_assessment_config = ( + gca_dataset_service.AssessDataRequest.TuningValidationAssessmentConfig( + model_name=model_name, + dataset_usage=dataset_usage_enum, + ) + ) + assess_lro = self.api_client.assess_data( + request=request, timeout=assess_request_timeout + ) + assessment_result = assess_lro.result(timeout=None) + return TuningValidationAssessmentResult( + errors=assessment_result.tuning_validation_assessment_result.errors + ) + + def assess_batch_prediction_resources( + self, + *, + model_name: str, + template_config: Optional[GeminiTemplateConfig] = None, + assess_request_timeout: Optional[float] = None, + ) -> BatchPredictionResourceUsageAssessmentResult: + """Assess the batch prediction resources required for a given model. + + Args: + model_name (str): + Required. The name of the model to assess the batch prediction resources + for. + template_config (GeminiTemplateConfig): + Optional. The template config used to assemble the dataset + before assessing the batch prediction resources. If not provided, the + template config attached to the dataset will be used. Required + if no template config is attached to the dataset. + assess_request_timeout (float): + Optional. The timeout for the assess batch prediction resources request. + Returns: + A dict containing the batch prediction resource usage assessment result. The + dict contains the following keys: + - token_count: The number of tokens in the dataset. + - audio_token_count: The number of audio tokens in the dataset. + + """ + request = self._build_assess_data_request(template_config) + request.batch_prediction_resource_usage_assessment_config = gca_dataset_service.AssessDataRequest.BatchPredictionResourceUsageAssessmentConfig( + model_name=model_name + ) + + assessment_result = ( + self.api_client.assess_data(request=request, timeout=assess_request_timeout) + .result(timeout=None) + .batch_prediction_resource_usage_assessment_result + ) + return BatchPredictionResourceUsageAssessmentResult( + token_count=assessment_result.token_count, + audio_token_count=assessment_result.audio_token_count, + ) + + def assess_batch_prediction_validity( + self, + *, + model_name: str, + template_config: Optional[GeminiTemplateConfig] = None, + assess_request_timeout: Optional[float] = None, + ) -> None: + """Assess if the assembled dataset is valid in terms of batch prediction + for a given model. Raises an error if the dataset is invalid, otherwise + returns None. + + Args: + model_name (str): + Required. The name of the model to assess the batch prediction + validity for. + dataset_usage (str): + Required. The dataset usage to assess the batch prediction + validity for. + Must be one of the following: SFT_TRAINING, SFT_VALIDATION. + template_config (GeminiTemplateConfig): + Optional. The template config used to assemble the dataset + before assessing the batch prediction validity. If not provided, the + template config attached to the dataset will be used. Required + if no template config is attached to the dataset. + assess_request_timeout (float): + Optional. The timeout for the assess batch prediction validity request. + """ + request = self._build_assess_data_request(template_config) + request.batch_prediction_validation_assessment_config = gca_dataset_service.AssessDataRequest.BatchPredictionValidationAssessmentConfig( + model_name=model_name, + ) + assess_lro = self.api_client.assess_data( + request=request, timeout=assess_request_timeout + ) + assess_lro.result(timeout=None) + + def _build_assess_data_request( + self, + template_config: Optional[GeminiTemplateConfig] = None, + ): + return gca_dataset_service.AssessDataRequest( + name=self.resource_name, + gemini_request_read_config=self._build_gemini_request_read_config( + template_config + ), + ) + + def _build_gemini_request_read_config( + self, provided_template_config: Optional[GeminiTemplateConfig] = None + ) -> gca_dataset_service.GeminiRequestReadConfig: + """Returns the provided template config wrapped in a read config if it + is not None, otherwise returns the read config attached to the + dataset.""" + if provided_template_config is not None: + return gca_dataset_service.GeminiRequestReadConfig( + template_config=provided_template_config._raw_gemini_template_config + ) + elif self.template_config is not None: + return gca_dataset_service.GeminiRequestReadConfig( + template_config=self.template_config._raw_gemini_template_config + ) + elif self.request_column_name is not None: + return gca_dataset_service.GeminiRequestReadConfig( + assembled_request_column_name=self.request_column_name + ) + else: + raise ValueError( + "No template config was provided and no template config is attached to the dataset." + ) diff --git a/google/cloud/aiplatform/preview/jobs.py b/google/cloud/aiplatform/preview/jobs.py index b8ed5519e7..645800e408 100644 --- a/google/cloud/aiplatform/preview/jobs.py +++ b/google/cloud/aiplatform/preview/jobs.py @@ -15,9 +15,8 @@ # limitations under the License. # -from typing import Dict, List, Optional, Union - import copy +from typing import Dict, List, Optional, Sequence, Union import uuid from google.api_core import retry @@ -68,6 +67,12 @@ gca_job_state_v1beta1.JobState.JOB_STATE_CANCELLED, ) +# _block_until_complete wait times +_JOB_WAIT_TIME = 5 # start at five seconds +_LOG_WAIT_TIME = 5 +_MAX_WAIT_TIME = 60 * 5 # 5 minute wait +_WAIT_TIME_MULTIPLIER = 2 # scale wait by 2 every iteration + class CustomJob(jobs.CustomJob): """Deprecated. Vertex AI Custom Job (preview).""" @@ -867,3 +872,549 @@ def _run( ) self._block_until_complete() + + +class BatchPredictionJob(jobs.BatchPredictionJob): + """Vertex AI Batch Prediction Job.""" + + @classmethod + def create( + cls, + # TODO(b/223262536): Make the job_display_name parameter optional in the next major release + job_display_name: str, + model_name: Union[str, "aiplatform.Model"], + instances_format: str = "jsonl", + predictions_format: str = "jsonl", + gcs_source: Optional[Union[str, Sequence[str]]] = None, + bigquery_source: Optional[str] = None, + gcs_destination_prefix: Optional[str] = None, + bigquery_destination_prefix: Optional[str] = None, + model_parameters: Optional[Dict] = None, + machine_type: Optional[str] = None, + accelerator_type: Optional[str] = None, + accelerator_count: Optional[int] = None, + starting_replica_count: Optional[int] = None, + max_replica_count: Optional[int] = None, + generate_explanation: Optional[bool] = False, + explanation_metadata: Optional["aiplatform.explain.ExplanationMetadata"] = None, + explanation_parameters: Optional[ + "aiplatform.explain.ExplanationParameters" + ] = None, + labels: Optional[Dict[str, str]] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + encryption_spec_key_name: Optional[str] = None, + sync: bool = True, + create_request_timeout: Optional[float] = None, + batch_size: Optional[int] = None, + model_monitoring_objective_config: Optional[ + "aiplatform.model_monitoring.ObjectiveConfig" + ] = None, + model_monitoring_alert_config: Optional[ + "aiplatform.model_monitoring.AlertConfig" + ] = None, + analysis_instance_schema_uri: Optional[str] = None, + service_account: Optional[str] = None, + reservation_affinity_type: Optional[str] = None, + reservation_affinity_key: Optional[str] = None, + reservation_affinity_values: Optional[List[str]] = None, + ) -> "BatchPredictionJob": + """Create a batch prediction job. + + Args: + job_display_name (str): + Required. The user-defined name of the BatchPredictionJob. + The name can be up to 128 characters long and can be consist + of any UTF-8 characters. + model_name (Union[str, aiplatform.Model]): + Required. A fully-qualified model resource name or model ID. + Example: "projects/123/locations/us-central1/models/456" or + "456" when project and location are initialized or passed. + May optionally contain a version ID or alias in + {model_name}@{version} form. + + Or an instance of aiplatform.Model. + instances_format (str): + Required. The format in which instances are provided. Must be one + of the formats listed in `Model.supported_input_storage_formats`. + Default is "jsonl" when using `gcs_source`. If a `bigquery_source` + is provided, this is overridden to "bigquery". + predictions_format (str): + Required. The format in which Vertex AI outputs the + predictions, must be one of the formats specified in + `Model.supported_output_storage_formats`. + Default is "jsonl" when using `gcs_destination_prefix`. If a + `bigquery_destination_prefix` is provided, this is overridden to + "bigquery". + gcs_source (Optional[Sequence[str]]): + Google Cloud Storage URI(-s) to your instances to run + batch prediction on. They must match `instances_format`. + + bigquery_source (Optional[str]): + BigQuery URI to a table, up to 2000 characters long. For example: + `bq://projectId.bqDatasetId.bqTableId` + gcs_destination_prefix (Optional[str]): + The Google Cloud Storage location of the directory where the + output is to be written to. In the given directory a new + directory is created. Its name is + ``prediction--``, where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + Inside of it files ``predictions_0001.``, + ``predictions_0002.``, ..., + ``predictions_N.`` are created where + ```` depends on chosen ``predictions_format``, + and N may equal 0001 and depends on the total number of + successfully predicted instances. If the Model has both + ``instance`` and ``prediction`` schemata defined then each such + file contains predictions as per the ``predictions_format``. + If prediction for any instance failed (partially or + completely), then an additional ``errors_0001.``, + ``errors_0002.``,..., ``errors_N.`` + files are created (N depends on total number of failed + predictions). These files contain the failed instances, as + per their schema, followed by an additional ``error`` field + which as value has ```google.rpc.Status`` `__ + containing only ``code`` and ``message`` fields. + bigquery_destination_prefix (Optional[str]): + The BigQuery project or dataset location where the output is + to be written to. If project is provided, a new dataset is + created with name + ``prediction__`` where + is made BigQuery-dataset-name compatible (for example, most + special characters become underscores), and timestamp is in + YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and + ``errors``. If the Model has both + [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] + schemata defined then the tables have columns as follows: + The ``predictions`` table contains instances for which the + prediction succeeded, it has columns as per a concatenation + of the Model's instance and prediction schemata. The + ``errors`` table contains rows for which the prediction has + failed, it has instance columns, as per the instance schema, + followed by a single "errors" column, which as values has + [google.rpc.Status][google.rpc.Status] represented as a + STRUCT, and containing only ``code`` and ``message``. + model_parameters (Optional[Dict]): + The parameters that govern the predictions. The schema of + the parameters may be specified via the Model's `parameters_schema_uri`. + machine_type (Optional[str]): + The type of machine for running batch prediction on + dedicated resources. Not specifying machine type will result in + batch prediction job being run with automatic resources. + accelerator_type (Optional[str]): + The type of accelerator(s) that may be attached + to the machine as per `accelerator_count`. Only used if + `machine_type` is set. + accelerator_count (Optional[int]): + The number of accelerators to attach to the + `machine_type`. Only used if `machine_type` is set. + starting_replica_count (Optional[int]): + The number of machine replicas used at the start of the batch + operation. If not set, Vertex AI decides starting number, not + greater than `max_replica_count`. Only used if `machine_type` is + set. + max_replica_count (Optional[int]): + The maximum number of machine replicas the batch operation may + be scaled to. Only used if `machine_type` is set. + Default is 10. + generate_explanation (bool): + Optional. Generate explanation along with the batch prediction + results. This will cause the batch prediction output to include + explanations based on the `prediction_format`: + - `bigquery`: output includes a column named `explanation`. The value + is a struct that conforms to the [aiplatform.gapic.Explanation] object. + - `jsonl`: The JSON objects on each line include an additional entry + keyed `explanation`. The value of the entry is a JSON object that + conforms to the [aiplatform.gapic.Explanation] object. + - `csv`: Generating explanations for CSV format is not supported. + explanation_metadata (aiplatform.explain.ExplanationMetadata): + Optional. Explanation metadata configuration for this BatchPredictionJob. + Can be specified only if `generate_explanation` is set to `True`. + + This value overrides the value of `Model.explanation_metadata`. + All fields of `explanation_metadata` are optional in the request. If + a field of the `explanation_metadata` object is not populated, the + corresponding field of the `Model.explanation_metadata` object is inherited. + For more details, see `Ref docs ` + explanation_parameters (aiplatform.explain.ExplanationParameters): + Optional. Parameters to configure explaining for Model's predictions. + Can be specified only if `generate_explanation` is set to `True`. + + This value overrides the value of `Model.explanation_parameters`. + All fields of `explanation_parameters` are optional in the request. If + a field of the `explanation_parameters` object is not populated, the + corresponding field of the `Model.explanation_parameters` object is inherited. + For more details, see `Ref docs ` + labels (Dict[str, str]): + Optional. The labels with user-defined metadata to organize your + BatchPredictionJobs. Label keys and values can be no longer than + 64 characters (Unicode codepoints), can only contain lowercase + letters, numeric characters, underscores and dashes. + International characters are allowed. See https://goo.gl/xmQnxf + for more information and examples of labels. + credentials (Optional[auth_credentials.Credentials]): + Custom credentials to use to create this batch prediction + job. Overrides credentials set in aiplatform.init. + encryption_spec_key_name (Optional[str]): + Optional. The Cloud KMS resource identifier of the customer + managed encryption key used to protect the job. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. + + If this is set, then all + resources created by the BatchPredictionJob will + be encrypted with the provided encryption key. + + Overrides encryption_spec_key_name set in aiplatform.init. + sync (bool): + Whether to execute this method synchronously. If False, this method + will be executed in concurrent Future and any downstream object will + be immediately returned and synced when the Future has completed. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. + batch_size (int): + Optional. The number of the records (e.g. instances) of the operation given in each batch + to a machine replica. Machine type, and size of a single record should be considered + when setting this parameter, higher value speeds up the batch operation's execution, + but too high value will result in a whole batch not fitting in a machine's memory, + and the whole operation will fail. + The default value is 64. + model_monitoring_objective_config (aiplatform.model_monitoring.ObjectiveConfig): + Optional. The objective config for model monitoring. Passing this parameter enables + monitoring on the model associated with this batch prediction job. + model_monitoring_alert_config (aiplatform.model_monitoring.EmailAlertConfig): + Optional. Configures how model monitoring alerts are sent to the user. Right now + only email alert is supported. + analysis_instance_schema_uri (str): + Optional. Only applicable if model_monitoring_objective_config is also passed. + This parameter specifies the YAML schema file uri describing the format of a single + instance that you want Tensorflow Data Validation (TFDV) to + analyze. If this field is empty, all the feature data types are + inferred from predict_instance_schema_uri, meaning that TFDV + will use the data in the exact format as prediction request/response. + If there are any data type differences between predict instance + and TFDV instance, this field can be used to override the schema. + For models trained with Vertex AI, this field must be set as all the + fields in predict instance formatted as string. + service_account (str): + Optional. Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this run-as account. + reservation_affinity_type (str): + Optional. The type of reservation affinity. + One of NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION, + SPECIFIC_THEN_ANY_RESERVATION, SPECIFIC_THEN_NO_RESERVATION + reservation_affinity_key (str): + Optional. Corresponds to the label key of a reservation resource. + To target a SPECIFIC_RESERVATION by name, use `compute.googleapis.com/reservation-name` as the key + and specify the name of your reservation as its value. + reservation_affinity_values (List[str]): + Optional. Corresponds to the label values of a reservation resource. + This must be the full resource name of the reservation. + Format: 'projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}' + Returns: + (jobs.BatchPredictionJob): + Instantiated representation of the created batch prediction job. + """ + return cls._submit_impl( + job_display_name=job_display_name, + model_name=model_name, + instances_format=instances_format, + predictions_format=predictions_format, + gcs_source=gcs_source, + bigquery_source=bigquery_source, + gcs_destination_prefix=gcs_destination_prefix, + bigquery_destination_prefix=bigquery_destination_prefix, + model_parameters=model_parameters, + machine_type=machine_type, + accelerator_type=accelerator_type, + accelerator_count=accelerator_count, + starting_replica_count=starting_replica_count, + max_replica_count=max_replica_count, + generate_explanation=generate_explanation, + explanation_metadata=explanation_metadata, + explanation_parameters=explanation_parameters, + labels=labels, + project=project, + location=location, + credentials=credentials, + encryption_spec_key_name=encryption_spec_key_name, + sync=sync, + create_request_timeout=create_request_timeout, + batch_size=batch_size, + model_monitoring_objective_config=model_monitoring_objective_config, + model_monitoring_alert_config=model_monitoring_alert_config, + analysis_instance_schema_uri=analysis_instance_schema_uri, + service_account=service_account, + reservation_affinity_type=reservation_affinity_type, + reservation_affinity_key=reservation_affinity_key, + reservation_affinity_values=reservation_affinity_values, + # Main distinction of `create` vs `submit`: + wait_for_completion=True, + ) + + @classmethod + def submit( + cls, + *, + job_display_name: Optional[str] = None, + model_name: Union[str, "aiplatform.Model"], + instances_format: str = "jsonl", + predictions_format: str = "jsonl", + gcs_source: Optional[Union[str, Sequence[str]]] = None, + bigquery_source: Optional[str] = None, + gcs_destination_prefix: Optional[str] = None, + bigquery_destination_prefix: Optional[str] = None, + model_parameters: Optional[Dict] = None, + machine_type: Optional[str] = None, + accelerator_type: Optional[str] = None, + accelerator_count: Optional[int] = None, + starting_replica_count: Optional[int] = None, + max_replica_count: Optional[int] = None, + generate_explanation: Optional[bool] = False, + explanation_metadata: Optional["aiplatform.explain.ExplanationMetadata"] = None, + explanation_parameters: Optional[ + "aiplatform.explain.ExplanationParameters" + ] = None, + labels: Optional[Dict[str, str]] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + encryption_spec_key_name: Optional[str] = None, + create_request_timeout: Optional[float] = None, + batch_size: Optional[int] = None, + model_monitoring_objective_config: Optional[ + "aiplatform.model_monitoring.ObjectiveConfig" + ] = None, + model_monitoring_alert_config: Optional[ + "aiplatform.model_monitoring.AlertConfig" + ] = None, + analysis_instance_schema_uri: Optional[str] = None, + service_account: Optional[str] = None, + reservation_affinity_type: Optional[str] = None, + reservation_affinity_key: Optional[str] = None, + reservation_affinity_values: Optional[List[str]] = None, + ) -> "BatchPredictionJob": + """Sumbit a batch prediction job (not waiting for completion). + + Args: + job_display_name (str): Required. The user-defined name of the + BatchPredictionJob. The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + model_name (Union[str, aiplatform.Model]): Required. A fully-qualified + model resource name or model ID. + Example: "projects/123/locations/us-central1/models/456" or "456" + when project and location are initialized or passed. May + optionally contain a version ID or alias in + {model_name}@{version} form. Or an instance of + aiplatform.Model. + instances_format (str): Required. The format in which instances are + provided. Must be one of the formats listed in + `Model.supported_input_storage_formats`. Default is "jsonl" when + using `gcs_source`. If a `bigquery_source` is provided, this is + overridden to "bigquery". + predictions_format (str): Required. The format in which Vertex AI + outputs the predictions, must be one of the formats specified in + `Model.supported_output_storage_formats`. Default is "jsonl" when + using `gcs_destination_prefix`. If a `bigquery_destination_prefix` + is provided, this is overridden to "bigquery". + gcs_source (Optional[Sequence[str]]): Google Cloud Storage URI(-s) to + your instances to run batch prediction on. They must match + `instances_format`. + bigquery_source (Optional[str]): BigQuery URI to a table, up to 2000 + characters long. For example: `bq://projectId.bqDatasetId.bqTableId` + gcs_destination_prefix (Optional[str]): The Google Cloud Storage + location of the directory where the output is to be written to. In + the given directory a new directory is created. Its name is + ``prediction--``, where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. Inside of + it files ``predictions_0001.``, + ``predictions_0002.``, ..., ``predictions_N.`` + are created where ```` depends on chosen + ``predictions_format``, and N may equal 0001 and depends on the + total number of successfully predicted instances. If the Model has + both ``instance`` and ``prediction`` schemata defined then each such + file contains predictions as per the ``predictions_format``. If + prediction for any instance failed (partially or completely), then + an additional ``errors_0001.``, + ``errors_0002.``,..., ``errors_N.`` files are + created (N depends on total number of failed predictions). These + files contain the failed instances, as per their schema, followed by + an additional ``error`` field which as value has + ```google.rpc.Status`` `__ containing only ``code`` and + ``message`` fields. + bigquery_destination_prefix (Optional[str]): The BigQuery project or + dataset location where the output is to be written to. If project is + provided, a new dataset is created with name + ``prediction__`` where is made + BigQuery-dataset-name compatible (for example, most special + characters become underscores), and timestamp is in + YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset + two tables will be created, ``predictions``, and ``errors``. If the + Model has both + [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] + schemata defined then the tables have columns as follows: The + ``predictions`` table contains instances for which the prediction + succeeded, it has columns as per a concatenation of the Model's + instance and prediction schemata. The ``errors`` table contains rows + for which the prediction has failed, it has instance columns, as per + the instance schema, followed by a single "errors" column, which as + values has [google.rpc.Status][google.rpc.Status] represented as a + STRUCT, and containing only ``code`` and ``message``. + model_parameters (Optional[Dict]): The parameters that govern the + predictions. The schema of the parameters may be specified via the + Model's `parameters_schema_uri`. + machine_type (Optional[str]): The type of machine for running batch + prediction on dedicated resources. Not specifying machine type will + result in batch prediction job being run with automatic resources. + accelerator_type (Optional[str]): The type of accelerator(s) that may + be attached to the machine as per `accelerator_count`. Only used if + `machine_type` is set. + accelerator_count (Optional[int]): The number of accelerators to + attach to the `machine_type`. Only used if `machine_type` is set. + starting_replica_count (Optional[int]): The number of machine replicas + used at the start of the batch operation. If not set, Vertex AI + decides starting number, not greater than `max_replica_count`. Only + used if `machine_type` is set. + max_replica_count (Optional[int]): The maximum number of machine + replicas the batch operation may be scaled to. Only used if + `machine_type` is set. Default is 10. + generate_explanation (bool): Optional. Generate explanation along with + the batch prediction results. This will cause the batch prediction + output to include explanations based on the `prediction_format`: - + `bigquery`: output includes a column named `explanation`. The value + is a struct that conforms to the [aiplatform.gapic.Explanation] + object. - `jsonl`: The JSON objects on each line include an + additional entry keyed `explanation`. The value of the entry is a + JSON object that conforms to the [aiplatform.gapic.Explanation] + object. - `csv`: Generating explanations for CSV format is not + supported. + explanation_metadata (aiplatform.explain.ExplanationMetadata): + Optional. Explanation metadata configuration for this + BatchPredictionJob. Can be specified only if `generate_explanation` + is set to `True`. This value overrides the value of + `Model.explanation_metadata`. All fields of `explanation_metadata` + are optional in the request. If a field of the + `explanation_metadata` object is not populated, the corresponding + field of the `Model.explanation_metadata` object is inherited. For + more details, see `Ref docs ` + explanation_parameters (aiplatform.explain.ExplanationParameters): + Optional. Parameters to configure explaining for Model's + predictions. Can be specified only if `generate_explanation` is set + to `True`. This value overrides the value of + `Model.explanation_parameters`. All fields of + `explanation_parameters` are optional in the request. If a field of + the `explanation_parameters` object is not populated, the + corresponding field of the `Model.explanation_parameters` object is + inherited. For more details, see `Ref docs + ` + labels (Dict[str, str]): Optional. The labels with user-defined + metadata to organize your BatchPredictionJobs. Label keys and values + can be no longer than 64 characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, underscores and + dashes. International characters are allowed. See + https://goo.gl/xmQnxf for more information and examples of labels. + credentials (Optional[auth_credentials.Credentials]): Custom + credentials to use to create this batch prediction job. Overrides + credentials set in aiplatform.init. + encryption_spec_key_name (Optional[str]): Optional. The Cloud KMS + resource identifier of the customer managed encryption key used to + protect the job. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. If this is set, then all resources created + by the BatchPredictionJob will be encrypted with the provided + encryption key. Overrides encryption_spec_key_name set in + aiplatform.init. + create_request_timeout (float): Optional. The timeout for the create + request in seconds. + batch_size (int): Optional. The number of the records (e.g. instances) + of the operation given in each batch to a machine replica. Machine + type, and size of a single record should be considered when setting + this parameter, higher value speeds up the batch operation's + execution, but too high value will result in a whole batch not + fitting in a machine's memory, and the whole operation will fail. + The default value is 64. model_monitoring_objective_config + (aiplatform.model_monitoring.ObjectiveConfig): Optional. The + objective config for model monitoring. Passing this parameter + enables monitoring on the model associated with this batch + prediction job. model_monitoring_alert_config + (aiplatform.model_monitoring.EmailAlertConfig): Optional. Configures + how model monitoring alerts are sent to the user. Right now only + email alert is supported. + analysis_instance_schema_uri (str): Optional. Only applicable if + model_monitoring_objective_config is also passed. This parameter + specifies the YAML schema file uri describing the format of a single + instance that you want Tensorflow Data Validation (TFDV) to analyze. + If this field is empty, all the feature data types are inferred from + predict_instance_schema_uri, meaning that TFDV will use the data in + the exact format as prediction request/response. If there are any + data type differences between predict instance and TFDV instance, + this field can be used to override the schema. For models trained + with Vertex AI, this field must be set as all the fields in predict + instance formatted as string. + service_account (str): Optional. Specifies the service account for + workload run-as account. Users submitting jobs must have act-as + permission on this run-as account. + reservation_affinity_type (str): Optional. The type of reservation + affinity. One of NO_RESERVATION, ANY_RESERVATION, + SPECIFIC_RESERVATION, SPECIFIC_THEN_ANY_RESERVATION, + SPECIFIC_THEN_NO_RESERVATION + reservation_affinity_key (str): Optional. Corresponds to the label key + of a reservation resource. To target a SPECIFIC_RESERVATION by name, + use `compute.googleapis.com/reservation-name` as the key and specify + the name of your reservation as its value. + reservation_affinity_values (List[str]): Optional. Corresponds to the + label values of a reservation resource. This must be the full + resource name of the reservation. + Format: + 'projects/{project_id_or_number}/zones/{zone}/reservations/{reservation_name}' + + Returns: + (jobs.BatchPredictionJob): + Instantiated representation of the created batch prediction job. + """ + return cls._submit_impl( + job_display_name=job_display_name, + model_name=model_name, + instances_format=instances_format, + predictions_format=predictions_format, + gcs_source=gcs_source, + bigquery_source=bigquery_source, + gcs_destination_prefix=gcs_destination_prefix, + bigquery_destination_prefix=bigquery_destination_prefix, + model_parameters=model_parameters, + machine_type=machine_type, + accelerator_type=accelerator_type, + accelerator_count=accelerator_count, + starting_replica_count=starting_replica_count, + max_replica_count=max_replica_count, + generate_explanation=generate_explanation, + explanation_metadata=explanation_metadata, + explanation_parameters=explanation_parameters, + labels=labels, + project=project, + location=location, + credentials=credentials, + encryption_spec_key_name=encryption_spec_key_name, + create_request_timeout=create_request_timeout, + batch_size=batch_size, + model_monitoring_objective_config=model_monitoring_objective_config, + model_monitoring_alert_config=model_monitoring_alert_config, + analysis_instance_schema_uri=analysis_instance_schema_uri, + service_account=service_account, + reservation_affinity_type=reservation_affinity_type, + reservation_affinity_key=reservation_affinity_key, + reservation_affinity_values=reservation_affinity_values, + # Main distinction of `create` vs `submit`: + wait_for_completion=False, + sync=True, + ) diff --git a/google/cloud/aiplatform/preview/models.py b/google/cloud/aiplatform/preview/models.py index 11837cd854..64714f6abe 100644 --- a/google/cloud/aiplatform/preview/models.py +++ b/google/cloud/aiplatform/preview/models.py @@ -28,6 +28,7 @@ from google.cloud.aiplatform import models from google.cloud.aiplatform import utils from google.cloud.aiplatform.utils import _explanation_utils +from google.cloud.aiplatform.utils import prediction_utils from google.cloud.aiplatform.compat.services import ( deployment_resource_pool_service_client_v1beta1, endpoint_service_client, @@ -41,6 +42,7 @@ machine_resources_v1beta1 as gca_machine_resources_compat, model_v1 as gca_model_compat, ) +from google.protobuf import duration_pb2 from google.protobuf import json_format _DEFAULT_MACHINE_TYPE = "n1-standard-2" @@ -138,6 +140,9 @@ def create( autoscaling_target_accelerator_duty_cycle: Optional[int] = None, sync=True, create_request_timeout: Optional[float] = None, + required_replica_count: Optional[int] = 0, + multihost_gpu_node_count: Optional[int] = None, + max_runtime_duration: Optional[int] = None, ) -> "DeploymentResourcePool": """Creates a new DeploymentResourcePool. @@ -196,6 +201,24 @@ def create( when the Future has completed. create_request_timeout (float): Optional. The create request timeout in seconds. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + multihost_gpu_node_count (int): + Optional. The number of nodes per replica for multihost GPU + deployments. Required for multihost GPU deployments. + max_runtime_duration (int): + Optional. Immutable. If set, use FlexStart VM powered by + Dynamic Workload Scheduler to schedule the deployment workload. + The FlexStart VM will be available for a maximum of 7 days or up + to the max_runtime_duration specified, whichever is shorter. + After this period, the model will be automatically undeployed. + The value is in seconds. Returns: DeploymentResourcePool @@ -222,6 +245,9 @@ def create( autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, sync=sync, create_request_timeout=create_request_timeout, + required_replica_count=required_replica_count, + multihost_gpu_node_count=multihost_gpu_node_count, + max_runtime_duration=max_runtime_duration, ) @classmethod @@ -243,6 +269,9 @@ def _create( autoscaling_target_accelerator_duty_cycle: Optional[int] = None, sync=True, create_request_timeout: Optional[float] = None, + required_replica_count: Optional[int] = 0, + multihost_gpu_node_count: Optional[int] = None, + max_runtime_duration: Optional[int] = None, ) -> "DeploymentResourcePool": """Creates a new DeploymentResourcePool. @@ -304,7 +333,24 @@ def _create( when the Future has completed. create_request_timeout (float): Optional. The create request timeout in seconds. - + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + multihost_gpu_node_count (int): + Optional. The number of nodes per replica for multihost GPU + deployments. Required for multihost GPU deployments. + max_runtime_duration (int): + Optional. Immutable. If set, use FlexStart VM powered by Dynamic + Workload Scheduler to schedule the deployment workload. The DWS + resource will be available for a maximum of 7 days or up to the + max_runtime_duration specified, whichever is shorter. After this + period, the model will be automatically undeployed. The value is + in seconds. Returns: DeploymentResourcePool """ @@ -316,10 +362,16 @@ def _create( dedicated_resources = gca_machine_resources_compat.DedicatedResources( min_replica_count=min_replica_count, max_replica_count=max_replica_count, + required_replica_count=required_replica_count, + ) + + prediction_utils.add_flex_start_to_dedicated_resources( + dedicated_resources, max_runtime_duration ) machine_spec = gca_machine_resources_compat.MachineSpec( - machine_type=machine_type + machine_type=machine_type, + multihost_gpu_node_count=multihost_gpu_node_count, ) if autoscaling_target_cpu_utilization: @@ -349,6 +401,9 @@ def _create( [autoscaling_metric_spec] ) + if multihost_gpu_node_count: + machine_spec.multihost_gpu_node_count = multihost_gpu_node_count + dedicated_resources.machine_spec = machine_spec gapic_drp = gca_deployment_resource_pool_compat.DeploymentResourcePool( @@ -451,6 +506,80 @@ def list( ) +class RolloutOptions(object): + """RolloutOptions contains configurations for rolling deployments. + + Attributes: + previous_deployed_model: + The ID of the previous deployed model. + max_surge_percentage: + Maximum additional replicas to create during the deployment, + specified as a percentage of the current replica count. + max_surge_replicas: + Maximum number of additional replicas to create during the + deployment. + max_unavailable_percentage: + Maximum amount of replicas that can be unavailable during the + deployment, specified as a percentage of the current replica count. + max_unavailable_replicas: + Maximum number of replicas that can be unavailable during the + deployment. + """ + + def __init__( + self, + previous_deployed_model: int, + max_surge_percentage: Optional[int] = None, + max_surge_replicas: Optional[int] = None, + max_unavailable_percentage: Optional[int] = None, + max_unavailable_replicas: Optional[int] = None, + ): + self.previous_deployed_model = previous_deployed_model + self.max_surge_percentage = max_surge_percentage + self.max_surge_replicas = max_surge_replicas + self.max_unavailable_percentage = max_unavailable_percentage + self.max_unavailable_replicas = max_unavailable_replicas + + @classmethod + def from_gapic(cls, opts: gca_endpoint_compat.RolloutOptions) -> "RolloutOptions": + return cls( + previous_deployed_model=int(opts.previous_deployed_model), + max_surge_percentage=opts.max_surge_percentage, + max_surge_replicas=opts.max_surge_replicas, + max_unavailable_percentage=opts.max_unavailable_percentage, + max_unavailable_replicas=opts.max_unavailable_replicas, + ) + + def to_gapic(self) -> gca_endpoint_compat.RolloutOptions: + """Converts RolloutOptions class to gapic RolloutOptions proto.""" + result = gca_endpoint_compat.RolloutOptions( + previous_deployed_model=str(self.previous_deployed_model), + ) + if self.max_surge_percentage: + if self.max_surge_replicas: + raise ValueError( + "max_surge_percentage and max_surge_replicas cannot both be" " set." + ) + result.max_surge_percentage = self.max_surge_percentage + elif self.max_surge_replicas: + result.max_surge_replicas = self.max_surge_replicas + else: + result.max_surge_replicas = 0 + if self.max_unavailable_percentage: + if self.max_unavailable_replicas: + raise ValueError( + "max_unavailable_percentage and max_unavailable_replicas" + " cannot both be set." + ) + result.max_unavailable_percentage = self.max_unavailable_percentage + elif self.max_unavailable_replicas: + result.max_unavailable_replicas = self.max_unavailable_replicas + else: + result.max_unavailable_replicas = 0 + + return result + + class Endpoint(aiplatform.Endpoint): @staticmethod def _validate_deploy_args( @@ -461,6 +590,10 @@ def _validate_deploy_args( traffic_split: Optional[Dict[str, int]], traffic_percentage: Optional[int], deployment_resource_pool: Optional[DeploymentResourcePool], + required_replica_count: Optional[int], + initial_replica_count: Optional[int], + min_scaleup_period: Optional[int], + idle_scaledown_period: Optional[int], ): """Helper method to validate deploy arguments. @@ -504,6 +637,25 @@ def _validate_deploy_args( are deployed to the same DeploymentResourcePool will be hosted in a shared model server. If provided, will override replica count arguments. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + initial_replica_count (int): + Optional. The number of replicas to deploy the model with. + Only applicable for scale-to-zero deployments where + min_replica_count is 0. + min_scaleup_period (int): + Optional. For scale-to-zero deployments, minimum duration that + a deployment will be scaled up before traffic is + evaluated for potential scale-down. + idle_scaledown_period (int): + Optional. For scale-to-zero deployments, duration of no traffic + before scaling to zero. Raises: ValueError: if min/max replica or accelerator type are specified @@ -513,11 +665,51 @@ def _validate_deploy_args( not sum to 100, or if the provided accelerator type is invalid. """ if not deployment_resource_pool: - if not (min_replica_count and max_replica_count): + if min_replica_count is None or max_replica_count is None: raise ValueError( - "Minimum and maximum replica counts must not be " + "Minimum and maximum replica counts must not be specified" "if not using a shared resource pool." ) + # Validate STZ parameters + if min_replica_count != 0: + if initial_replica_count: + raise ValueError( + "Initial replica count cannot be set for non-STZ models." + ) + if min_scaleup_period: + raise ValueError( + "Min scaleup period cannot be set for non-STZ models." + ) + if idle_scaledown_period: + raise ValueError( + "Idle scaledown period cannot be set for non-STZ models." + ) + if min_replica_count == 0 and initial_replica_count: + if initial_replica_count < 0: + raise ValueError("Initial replica count must be at least 0.") + if initial_replica_count > max_replica_count: + raise ValueError( + "Initial replica count cannot be " + "greater than max replica count." + ) + if min_replica_count == 0 and min_scaleup_period: + if min_scaleup_period < 300: + raise ValueError( + "Min scaleup period cannot be less than 300 (5 minutes)." + ) + if min_scaleup_period > 28800: + raise ValueError( + "Min scaleup period cannot be greater than 28800 (8 hours)." + ) + if min_replica_count == 0 and idle_scaledown_period: + if idle_scaledown_period < 300: + raise ValueError( + "Idle scaledown period cannot be less than 300 (5 minutes)." + ) + if idle_scaledown_period > 28800: + raise ValueError( + "Idle scaledown period cannot be greater than 28800 (8 hours)." + ) return aiplatform.Endpoint._validate_deploy_args( min_replica_count=min_replica_count, max_replica_count=max_replica_count, @@ -526,6 +718,7 @@ def _validate_deploy_args( traffic_split=traffic_split, traffic_percentage=traffic_percentage, deployment_resource_pool=deployment_resource_pool, + required_replica_count=required_replica_count, ) if ( @@ -533,6 +726,11 @@ def _validate_deploy_args( and min_replica_count != 1 or max_replica_count and max_replica_count != 1 + or required_replica_count + and required_replica_count != 0 + or initial_replica_count + or min_scaleup_period + or idle_scaledown_period ): _LOGGER.warning( "Ignoring explicitly specified replica counts, " @@ -544,6 +742,11 @@ def _validate_deploy_args( "deployment_resource_pool may not be specified at the same time" "as accelerator_type." ) + if initial_replica_count or min_scaleup_period or idle_scaledown_period: + raise ValueError( + "Scale-to-zero parameters may not be specified at the same time" + "as deployment_resource_pool." + ) if traffic_split is None: if traffic_percentage > 100: raise ValueError("Traffic percentage cannot be greater than 100.") @@ -579,10 +782,23 @@ def deploy( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, deployment_resource_pool: Optional[DeploymentResourcePool] = None, disable_container_logging: bool = False, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, + rollout_options: Optional[RolloutOptions] = None, + multihost_gpu_node_count: Optional[int] = None, + max_runtime_duration: Optional[int] = None, + initial_replica_count: Optional[int] = None, + min_scaleup_period: Optional[int] = None, + idle_scaledown_period: Optional[int] = None, ) -> None: """Deploys a Model to the Endpoint. @@ -656,6 +872,19 @@ def deploy( autoscaling_target_accelerator_duty_cycle (int): Target Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): Target request + count per minute per instance. + autoscaling_target_dcgm_fi_dev_gpu_util (int): Target DCGM metrics for + GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): Target vLLM metrics + for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): Target vLLM metrics + for number of inference requests currently waiting in the queue. + autoscaling_target_pubsub_num_undelivered_messages (int): Target number + of pubsub undelivered messages per instance. + autoscaling_pubsub_subscription_labels (Dict[str, str]): Optional. + Monitored resource labels as key value pairs for metric filtering + for pubsub_num_undelivered_messages. deployment_resource_pool (DeploymentResourcePool): Optional. Resource pool where the model will be deployed. All models that are deployed to the same DeploymentResourcePool will be hosted in @@ -671,7 +900,36 @@ def deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. - + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + rollout_options (RolloutOptions): + Optional. Options to configure a rolling deployment. + multihost_gpu_node_count (int): Optional. The number of nodes per + replica for multihost GPU deployments. Required for multihost GPU + deployments. + max_runtime_duration (int): + Optional. Immutable. If set, use DWS resource + to schedule the deployment workload. The DWS resource will be available + for a maximum of 7 days or up to the max_runtime_duration specified, + whichever is shorter. After this period, the model will be + automatically undeployed. The value is in seconds. + initial_replica_count (int): + Optional. The number of replicas to deploy the model with. + Only applicable for scale-to-zero deployments where + min_replica_count is 0. + min_scaleup_period (int): + Optional. For scale-to-zero deployments, minimum duration that + a deployment will be scaled up before traffic is + evaluated for potential scale-down. + idle_scaledown_period (int): + Optional. For scale-to-zero deployments, duration of no traffic + before scaling to zero. """ self._sync_gca_resource_if_skipped() @@ -683,6 +941,10 @@ def deploy( traffic_split=traffic_split, traffic_percentage=traffic_percentage, deployment_resource_pool=deployment_resource_pool, + required_replica_count=required_replica_count, + initial_replica_count=initial_replica_count, + min_scaleup_period=min_scaleup_period, + idle_scaledown_period=idle_scaledown_period, ) explanation_spec = _explanation_utils.create_and_validate_explanation_spec( @@ -707,10 +969,23 @@ def deploy( deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, deployment_resource_pool=deployment_resource_pool, disable_container_logging=disable_container_logging, fast_tryout_enabled=fast_tryout_enabled, system_labels=system_labels, + required_replica_count=required_replica_count, + rollout_options=rollout_options, + multihost_gpu_node_count=multihost_gpu_node_count, + max_runtime_duration=max_runtime_duration, + initial_replica_count=initial_replica_count, + min_scaleup_period=min_scaleup_period, + idle_scaledown_period=idle_scaledown_period, ) @base.optional_sync() @@ -732,10 +1007,23 @@ def _deploy( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, deployment_resource_pool: Optional[DeploymentResourcePool] = None, disable_container_logging: bool = False, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, + rollout_options: Optional[RolloutOptions] = None, + multihost_gpu_node_count: Optional[int] = None, + max_runtime_duration: Optional[int] = None, + initial_replica_count: Optional[int] = None, + min_scaleup_period: Optional[int] = None, + idle_scaledown_period: Optional[int] = None, ) -> None: """Deploys a Model to the Endpoint. @@ -803,6 +1091,19 @@ def _deploy( autoscaling_target_accelerator_duty_cycle (int): Target Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): Target request + count per minute per instance. + autoscaling_target_dcgm_fi_dev_gpu_util (int): Target DCGM metrics for + GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): Target vLLM metrics + for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): Target vLLM metrics + for number of inference requests currently waiting in the queue. + autoscaling_target_pubsub_num_undelivered_messages (int): Target number + of pubsub undelivered messages per instance. + autoscaling_pubsub_subscription_labels (Dict[str, str]): Optional. + Monitored resource labels as key value pairs for metric filtering + for pubsub_num_undelivered_messages. deployment_resource_pool (DeploymentResourcePool): Optional. Resource pool where the model will be deployed. All models that are deployed to the same DeploymentResourcePool will be hosted in @@ -818,7 +1119,36 @@ def _deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. - + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + rollout_options (RolloutOptions): Optional. + Options to configure a rolling deployment. + multihost_gpu_node_count (int): Optional. The number of nodes per + replica for multihost GPU deployments. Required for multihost + GPU deployments. + max_runtime_duration (int): + Optional. Immutable. If set, use DWS resource + to schedule the deployment workload. The DWS resource will be available + for a maximum of 7 days or up to the max_runtime_duration specified, + whichever is shorter. After this period, the model will be + automatically undeployed. The value is in seconds. + initial_replica_count (int): + Optional. The number of replicas to deploy the model with. + Only applicable for scale-to-zero deployments where + min_replica_count is 0. + min_scaleup_period (int): + Optional. For scale-to-zero deployments, minimum duration that + a deployment will be scaled up before traffic is + evaluated for potential scale-down. + idle_scaledown_period (int): + Optional. For scale-to-zero deployments, duration of no traffic + before scaling to zero. """ _LOGGER.log_action_start_against_resource( f"Deploying Model {model.resource_name} to", "", self @@ -844,10 +1174,23 @@ def _deploy( deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, deployment_resource_pool=deployment_resource_pool, disable_container_logging=disable_container_logging, fast_tryout_enabled=fast_tryout_enabled, system_labels=system_labels, + required_replica_count=required_replica_count, + rollout_options=rollout_options, + multihost_gpu_node_count=multihost_gpu_node_count, + max_runtime_duration=max_runtime_duration, + initial_replica_count=initial_replica_count, + min_scaleup_period=min_scaleup_period, + idle_scaledown_period=idle_scaledown_period, ) _LOGGER.log_action_completed_against_resource("model", "deployed", self) @@ -876,10 +1219,23 @@ def _deploy_call( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, deployment_resource_pool: Optional[DeploymentResourcePool] = None, disable_container_logging: bool = False, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, + rollout_options: Optional[RolloutOptions] = None, + multihost_gpu_node_count: Optional[int] = None, + max_runtime_duration: Optional[int] = None, + initial_replica_count: Optional[int] = None, + min_scaleup_period: Optional[int] = None, + idle_scaledown_period: Optional[int] = None, ) -> None: """Helper method to deploy model to endpoint. @@ -954,6 +1310,20 @@ def _deploy_call( Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): Optional. Target + request count per minute per instance. + autoscaling_target_dcgm_fi_dev_gpu_util (int): Optional. Target DCGM + metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): Optional. Target + vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): Optional. Target + vLLM metrics for number of inference requests currently waiting in the + queue. + autoscaling_target_pubsub_num_undelivered_messages (int): Optional. + Target number of pubsub undelivered messages per instance. + autoscaling_pubsub_subscription_labels (Dict[str, str]): Optional. + Monitored resource labels as key value pairs for metric filtering + for pubsub_num_undelivered_messages. deployment_resource_pool (DeploymentResourcePool): Optional. Resource pool where the model will be deployed. All models that are deployed to the same DeploymentResourcePool will be hosted in @@ -969,6 +1339,33 @@ def _deploy_call( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + rollout_options (RolloutOptions): Optional. Options to configure a + rolling deployment. + multihost_gpu_node_count (int): + Optional. The number of nodes per replica for multihost GPU + deployments. Required for multihost GPU deployments. + max_runtime_duration (int): + Optional. Immutable. If set, use DWS resource + to schedule the deployment workload. The DWS resource will be available + for a maximum of 7 days or up to the max_runtime_duration specified, + whichever is shorter. After this period, the model will be + automatically undeployed. The value is in seconds. + initial_replica_count (int): Optional. The number of replicas to + deploy the model with. Only applicable for scale-to-zero + deployments where min_replica_count is 0. + min_scaleup_period (int): Optional. For scale-to-zero deployments, + minimum duration that a deployment will be scaled up before traffic + is evaluated for potential scale-down. + idle_scaledown_period (int): Optional. For scale-to-zero deployments, + duration of no traffic before scaling to zero. Raises: ValueError: If only `accelerator_type` or `accelerator_count` is @@ -1020,7 +1417,13 @@ def _deploy_call( or accelerator_type or accelerator_count or autoscaling_target_accelerator_duty_cycle + or autoscaling_target_request_count_per_minute or autoscaling_target_cpu_utilization + or autoscaling_target_dcgm_fi_dev_gpu_util + or autoscaling_target_vllm_gpu_cache_usage_perc + or autoscaling_target_vllm_num_requests_waiting + or autoscaling_target_pubsub_num_undelivered_messages + or autoscaling_pubsub_subscription_labels ) # If the model supports both automatic and dedicated deployment resources, @@ -1032,23 +1435,54 @@ def _deploy_call( if provided_custom_machine_spec and not use_dedicated_resources: _LOGGER.info( "Model does not support dedicated deployment resources. " - "The machine_type, accelerator_type and accelerator_count," - "autoscaling_target_accelerator_duty_cycle," - "autoscaling_target_cpu_utilization parameters are ignored." + "The machine_type, accelerator_type and accelerator_count, " + "autoscaling_target_accelerator_duty_cycle, " + "autoscaling_target_cpu_utilization, " + "autoscaling_target_request_count_per_minute, " + "autoscaling_target_dcgm_fi_dev_gpu_util, " + "autoscaling_target_vllm_gpu_cache_usage_perc, " + "autoscaling_target_vllm_num_requests_waiting, " + "autoscaling_target_pubsub_num_undelivered_messages, " + "autoscaling_pubsub_subscription_labels parameters " + "are ignored." ) if use_dedicated_resources and not machine_type: machine_type = _DEFAULT_MACHINE_TYPE _LOGGER.info(f"Using default machine_type: {machine_type}") - if use_dedicated_resources: + if use_dedicated_resources and not rollout_options: dedicated_resources = gca_machine_resources_compat.DedicatedResources( min_replica_count=min_replica_count, max_replica_count=max_replica_count, + required_replica_count=required_replica_count, + ) + + # If min_replica_count is 0, set Scale to Zero fields. + if dedicated_resources.min_replica_count == 0: + # Set initial replica count + dedicated_resources.initial_replica_count = initial_replica_count + # Set scale to zero spec. + stz_spec = ( + gca_machine_resources_compat.DedicatedResources.ScaleToZeroSpec() + ) + if min_scaleup_period is not None: + stz_spec.min_scaleup_period = duration_pb2.Duration( + seconds=min_scaleup_period + ) + if idle_scaledown_period is not None: + stz_spec.idle_scaledown_period = duration_pb2.Duration( + seconds=idle_scaledown_period + ) + dedicated_resources.scale_to_zero_spec = stz_spec + + prediction_utils.add_flex_start_to_dedicated_resources( + dedicated_resources, max_runtime_duration ) machine_spec = gca_machine_resources_compat.MachineSpec( - machine_type=machine_type + machine_type=machine_type, + multihost_gpu_node_count=multihost_gpu_node_count, ) if autoscaling_target_cpu_utilization: @@ -1074,6 +1508,73 @@ def _deploy_call( [autoscaling_metric_spec] ) + if autoscaling_target_request_count_per_minute: + autoscaling_metric_spec = ( + gca_machine_resources_compat.AutoscalingMetricSpec( + metric_name=( + "aiplatform.googleapis.com/prediction/online/" + "request_count" + ), + target=autoscaling_target_request_count_per_minute, + ) + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + + if autoscaling_target_dcgm_fi_dev_gpu_util: + autoscaling_metric_spec = ( + gca_machine_resources_compat.AutoscalingMetricSpec( + metric_name=( + "prometheus.googleapis.com/vertex_dcgm_fi_dev_gpu_util" + ), + target=autoscaling_target_dcgm_fi_dev_gpu_util, + ) + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + + if autoscaling_target_vllm_gpu_cache_usage_perc: + autoscaling_metric_spec = ( + gca_machine_resources_compat.AutoscalingMetricSpec( + metric_name=( + "prometheus.googleapis.com/" + "vertex_vllm_gpu_cache_usage_perc" + ), + target=autoscaling_target_vllm_gpu_cache_usage_perc, + ) + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + + if autoscaling_target_vllm_num_requests_waiting: + autoscaling_metric_spec = ( + gca_machine_resources_compat.AutoscalingMetricSpec( + metric_name=( + "prometheus.googleapis.com/" + "vertex_vllm_num_requests_waiting" + ), + target=autoscaling_target_vllm_num_requests_waiting, + ) + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + + if autoscaling_target_pubsub_num_undelivered_messages: + autoscaling_metric_spec = gca_machine_resources_compat.AutoscalingMetricSpec( + metric_name=( + "pubsub.googleapis.com/subscription/num_undelivered_messages" + ), + target=autoscaling_target_pubsub_num_undelivered_messages, + monitored_resource_labels=autoscaling_pubsub_subscription_labels, + ) + dedicated_resources.autoscaling_metric_specs.extend( + [autoscaling_metric_spec] + ) + dedicated_resources.machine_spec = machine_spec # Checking if flag fast_tryout_enabled is set, only in v1beta1 @@ -1083,6 +1584,15 @@ def _deploy_call( ) ) deployed_model.dedicated_resources = dedicated_resources + elif rollout_options: + deployed_model.rollout_options = rollout_options.to_gapic() + elif supports_automatic_resources: + deployed_model.automatic_resources = ( + gca_machine_resources_compat.AutomaticResources( + min_replica_count=min_replica_count, + max_replica_count=max_replica_count, + ) + ) else: deployed_model = gca_endpoint_compat.DeployedModel( model=model.versioned_resource_name, @@ -1111,15 +1621,28 @@ def _deploy_call( or accelerator_count or autoscaling_target_accelerator_duty_cycle or autoscaling_target_cpu_utilization + or autoscaling_target_request_count_per_minute + or autoscaling_target_dcgm_fi_dev_gpu_util + or autoscaling_target_vllm_gpu_cache_usage_perc + or autoscaling_target_vllm_num_requests_waiting + or autoscaling_target_pubsub_num_undelivered_messages + or autoscaling_pubsub_subscription_labels ) if provided_custom_machine_spec: raise ValueError( "Conflicting parameters in deployment request. " - "The machine_type, accelerator_type and accelerator_count," - "autoscaling_target_accelerator_duty_cycle," - "autoscaling_target_cpu_utilization parameters may not be set " - "when `deployment_resource_pool` is specified." + "The machine_type, accelerator_type and accelerator_count, " + "autoscaling_target_accelerator_duty_cycle, " + "autoscaling_target_cpu_utilization, " + "autoscaling_target_request_count_per_minute, " + "autoscaling_target_dcgm_fi_dev_gpu_util, " + "autoscaling_target_vllm_gpu_cache_usage_perc, " + "autoscaling_target_vllm_num_requests_waiting, " + "autoscaling_target_pubsub_num_undelivered_messages, " + "autoscaling_pubsub_subscription_labels parameters " + "may not be set when `deployment_resource_pool` is " + "specified." ) deployed_model.shared_resources = deployment_resource_pool.resource_name @@ -1376,10 +1899,23 @@ def deploy( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, deployment_resource_pool: Optional[DeploymentResourcePool] = None, disable_container_logging: bool = False, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, + rollout_options: Optional[RolloutOptions] = None, + multihost_gpu_node_count: Optional[int] = None, + max_runtime_duration: Optional[int] = None, + initial_replica_count: Optional[int] = None, + min_scaleup_period: Optional[int] = None, + idle_scaledown_period: Optional[int] = None, ) -> Union[Endpoint, models.PrivateEndpoint]: """Deploys model to endpoint. @@ -1474,6 +2010,20 @@ def deploy( Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): Optional. Target + request count per minute per instance. + autoscaling_target_dcgm_fi_dev_gpu_util (int): Optional. Target DCGM + metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): Optional. Target + vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): Optional. Target + vLLM metrics for number of inference requests currently waiting in the + queue. + autoscaling_target_pubsub_num_undelivered_messages (int): Optional. Target + number of pubsub undelivered messages per instance. + autoscaling_pubsub_subscription_labels (Dict[str, str]): Optional. + Monitored resource labels as key value pairs for metric filtering + for pubsub_num_undelivered_messages. deployment_resource_pool (DeploymentResourcePool): Optional. Resource pool where the model will be deployed. All models that are deployed to the same DeploymentResourcePool will be hosted in @@ -1489,7 +2039,33 @@ def deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. - + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + rollout_options (RolloutOptions): + Optional. Options to configure a rolling deployment. + multihost_gpu_node_count (int): + Optional. The number of nodes per replica for multihost GPU + deployments. Required for multihost GPU deployments. + max_runtime_duration (int): + Optional. Immutable. If set, use DWS resource + to schedule the deployment workload. The DWS resource will be available + for a maximum of 7 days or up to the max_runtime_duration specified, + whichever is shorter. After this period, the model will be + automatically undeployed. The value is in seconds. + initial_replica_count (int): Optional. The number of replicas to + deploy the model with. Only applicable for scale-to-zero + deployments where min_replica_count is 0. + min_scaleup_period (int): Optional. For scale-to-zero deployments, + minimum duration that a deployment will be scaled up before traffic + is evaluated for potential scale-down. + idle_scaledown_period (int): Optional. For scale-to-zero deployments, + duration of no traffic before scaling to zero. Returns: endpoint (Union[Endpoint, models.PrivateEndpoint]): Endpoint with the deployed model. @@ -1507,6 +2083,10 @@ def deploy( traffic_split=traffic_split, traffic_percentage=traffic_percentage, deployment_resource_pool=deployment_resource_pool, + required_replica_count=required_replica_count, + initial_replica_count=initial_replica_count, + min_scaleup_period=min_scaleup_period, + idle_scaledown_period=idle_scaledown_period, ) if isinstance(endpoint, models.PrivateEndpoint): @@ -1535,17 +2115,30 @@ def deploy( service_account=service_account, explanation_spec=explanation_spec, metadata=metadata, - encryption_spec_key_name=encryption_spec_key_name + encryption_spec_key_name=encryption_spec_key_name, + initial_replica_count=initial_replica_count, + min_scaleup_period=min_scaleup_period, + idle_scaledown_period=idle_scaledown_period or initializer.global_config.encryption_spec_key_name, network=network, sync=sync, deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, deployment_resource_pool=deployment_resource_pool, disable_container_logging=disable_container_logging, fast_tryout_enabled=fast_tryout_enabled, system_labels=system_labels, + required_replica_count=required_replica_count, + rollout_options=rollout_options, + multihost_gpu_node_count=multihost_gpu_node_count, + max_runtime_duration=max_runtime_duration, ) def _should_enable_dedicated_endpoint(self, fast_tryout_enabled: bool) -> bool: @@ -1576,10 +2169,23 @@ def _deploy( deploy_request_timeout: Optional[float] = None, autoscaling_target_cpu_utilization: Optional[int] = None, autoscaling_target_accelerator_duty_cycle: Optional[int] = None, + autoscaling_target_request_count_per_minute: Optional[int] = None, + autoscaling_target_dcgm_fi_dev_gpu_util: Optional[int] = None, + autoscaling_target_vllm_gpu_cache_usage_perc: Optional[int] = None, + autoscaling_target_vllm_num_requests_waiting: Optional[int] = None, + autoscaling_target_pubsub_num_undelivered_messages: Optional[int] = None, + autoscaling_pubsub_subscription_labels: Optional[Dict[str, str]] = None, deployment_resource_pool: Optional[DeploymentResourcePool] = None, disable_container_logging: bool = False, fast_tryout_enabled: bool = False, system_labels: Optional[Dict[str, str]] = None, + required_replica_count: Optional[int] = 0, + rollout_options: Optional[RolloutOptions] = None, + multihost_gpu_node_count: Optional[int] = None, + max_runtime_duration: Optional[int] = None, + initial_replica_count: Optional[int] = None, + min_scaleup_period: Optional[int] = None, + idle_scaledown_period: Optional[int] = None, ) -> Union[Endpoint, models.PrivateEndpoint]: """Deploys model to endpoint. @@ -1666,6 +2272,20 @@ def _deploy( Accelerator Duty Cycle. Must also set accelerator_type and accelerator_count if specified. A default value of 60 will be used if not specified. + autoscaling_target_request_count_per_minute (int): Optional. Target + request count per minute per instance. + autoscaling_target_dcgm_fi_dev_gpu_util (int): Optional. Target DCGM + metrics for GPU utilization. + autoscaling_target_vllm_gpu_cache_usage_perc (int): Optional. Target + vLLM metrics for GPU KV cache usage percentage. + autoscaling_target_vllm_num_requests_waiting (int): Optional. Target + vLLM metrics for number of inference requests currently waiting in the + queue. + autoscaling_target_pubsub_num_undelivered_messages (int): Optional. Target + number of pubsub undelivered messages per instance. + autoscaling_pubsub_subscription_labels (Dict[str, str]): Optional. + Monitored resource labels as key value pairs for metric filtering + for pubsub_num_undelivered_messages. deployment_resource_pool (DeploymentResourcePool): Optional. Resource pool where the model will be deployed. All models that are deployed to the same DeploymentResourcePool will be hosted in @@ -1680,7 +2300,36 @@ def _deploy( system_labels (Dict[str, str]): Optional. System labels to apply to Model Garden deployments. System labels are managed by Google for internal use only. - + required_replica_count (int): + Optional. Number of required available replicas for the + deployment to succeed. This field is only needed when partial + model deployment/mutation is desired, with a value greater than + or equal to 1 and fewer than or equal to min_replica_count. If + set, the model deploy/mutate operation will succeed once + available_replica_count reaches required_replica_count, and the + rest of the replicas will be retried. + rollout_options (RolloutOptions): + Optional. Options to configure a rolling deployment. + multihost_gpu_node_count (int): + Optional. The number of nodes per replica for multihost GPU + deployments. Required for multihost GPU deployments. + max_runtime_duration (int): + Optional. Immutable. If set, use DWS resource + to schedule the deployment workload. The DWS resource will be available + for a maximum of 7 days or up to the max_runtime_duration specified, + whichever is shorter. After this period, the model will be + automatically undeployed. The value is in seconds. + initial_replica_count (int): + Optional. The number of replicas to deploy the model with. + Only applicable for scale-to-zero deployments where + min_replica_count is 0. + min_scaleup_period (int): + Optional. For scale-to-zero deployments, minimum duration that + a deployment will be scaled up before traffic is + evaluated for potential scale-down. + idle_scaledown_period (int): + Optional. For scale-to-zero deployments, duration of no traffic + before scaling to zero. Returns: endpoint (Union[Endpoint, models.PrivateEndpoint]): Endpoint with the deployed model. @@ -1688,6 +2337,10 @@ def _deploy( if endpoint is None: display_name = self.display_name[:118] + "_endpoint" + if rollout_options is not None: + raise ValueError( + "Rollout options may only be used when deploying to an existing endpoint." + ) if not network: endpoint = Endpoint.create( @@ -1709,6 +2362,10 @@ def _deploy( credentials=self.credentials, encryption_spec_key_name=encryption_spec_key_name, ) + if isinstance(endpoint, Endpoint): + preview_kwargs = {"rollout_options": rollout_options} + else: + preview_kwargs = {} _LOGGER.log_action_start_against_resource("Deploying model to", "", endpoint) @@ -1732,10 +2389,23 @@ def _deploy( deploy_request_timeout=deploy_request_timeout, autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization, autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle, + autoscaling_target_request_count_per_minute=autoscaling_target_request_count_per_minute, + autoscaling_target_dcgm_fi_dev_gpu_util=autoscaling_target_dcgm_fi_dev_gpu_util, + autoscaling_target_vllm_gpu_cache_usage_perc=autoscaling_target_vllm_gpu_cache_usage_perc, + autoscaling_target_vllm_num_requests_waiting=autoscaling_target_vllm_num_requests_waiting, + autoscaling_target_pubsub_num_undelivered_messages=autoscaling_target_pubsub_num_undelivered_messages, + autoscaling_pubsub_subscription_labels=autoscaling_pubsub_subscription_labels, deployment_resource_pool=deployment_resource_pool, disable_container_logging=disable_container_logging, fast_tryout_enabled=fast_tryout_enabled, system_labels=system_labels, + required_replica_count=required_replica_count, + multihost_gpu_node_count=multihost_gpu_node_count, + max_runtime_duration=max_runtime_duration, + initial_replica_count=initial_replica_count, + min_scaleup_period=min_scaleup_period, + idle_scaledown_period=idle_scaledown_period, + **preview_kwargs, ) _LOGGER.log_action_completed_against_resource("model", "deployed", endpoint) diff --git a/google/cloud/aiplatform/preview/persistent_resource.py b/google/cloud/aiplatform/preview/persistent_resource.py index e06b08e90b..54a9d55d3d 100644 --- a/google/cloud/aiplatform/preview/persistent_resource.py +++ b/google/cloud/aiplatform/preview/persistent_resource.py @@ -177,6 +177,7 @@ def create( labels: Optional[Dict[str, str]] = None, network: Optional[str] = None, kms_key_name: Optional[str] = None, + enable_custom_service_account: Optional[bool] = None, service_account: Optional[str] = None, reserved_ip_ranges: List[str] = None, sync: Optional[bool] = True, # pylint: disable=unused-argument @@ -240,6 +241,16 @@ def create( PersistentResource. If set, this PersistentResource and all sub-resources of this PersistentResource will be secured by this key. + enable_custom_service_account (bool): + Optional. When set to True, allows the `service_account` + parameter to specify a custom service account for workloads on this + PersistentResource. Defaults to None (False behavior). + + If True, the service account provided in the `service_account` parameter + will be used for workloads (runtimes, jobs), provided the user has the + ``iam.serviceAccounts.actAs`` permission. If False, the + `service_account` parameter is ignored, and the PersistentResource + will use the default service account. service_account (str): Optional. Default service account that this PersistentResource's workloads run as. The workloads @@ -301,7 +312,29 @@ def create( gca_encryption_spec_compat.EncryptionSpec(kms_key_name=kms_key_name) ) - if service_account: + if ( + enable_custom_service_account is False and service_account is not None + ): # pylint: disable=g-bool-id-comparison + raise ValueError( + "The parameter `enable_custom_service_account` was set to False, " + "but a value was provided for `service_account`. These two " + "settings are incompatible. If you want to use a custom " + "service account, set `enable_custom_service_account` to True." + ) + + elif enable_custom_service_account: + service_account_spec = gca_persistent_resource_compat.ServiceAccountSpec( + enable_custom_service_account=True, + # Set service_account if it is provided, otherwise set to None + service_account=service_account if service_account else None, + ) + gca_persistent_resource.resource_runtime_spec = ( + gca_persistent_resource_compat.ResourceRuntimeSpec( + service_account_spec=service_account_spec + ) + ) + elif service_account: + # Handle the deprecated case where only service_account is provided service_account_spec = gca_persistent_resource_compat.ServiceAccountSpec( enable_custom_service_account=True, service_account=service_account ) diff --git a/google/cloud/aiplatform/preview/pipelinejob/pipeline_jobs.py b/google/cloud/aiplatform/preview/pipelinejob/pipeline_jobs.py index d45792b081..b0eb7293e4 100644 --- a/google/cloud/aiplatform/preview/pipelinejob/pipeline_jobs.py +++ b/google/cloud/aiplatform/preview/pipelinejob/pipeline_jobs.py @@ -295,6 +295,7 @@ def create_schedule( allow_queueing: bool = False, max_run_count: Optional[int] = None, max_concurrent_run_count: int = 1, + max_concurrent_active_run_count: Optional[int] = None, service_account: Optional[str] = None, network: Optional[str] = None, create_request_timeout: Optional[float] = None, @@ -335,6 +336,10 @@ def create_schedule( Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. + max_concurrent_active_run_count (int): + Optional. Maximum number of active runs that can be executed + concurrently for this PipelineJobSchedule. Active runs are those + in a non-terminal state (e.g., RUNNING, PENDING, or QUEUED). service_account (str): Optional. Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. @@ -358,6 +363,7 @@ def create_schedule( allow_queueing=allow_queueing, max_run_count=max_run_count, max_concurrent_run_count=max_concurrent_run_count, + max_concurrent_active_run_count=max_concurrent_active_run_count, service_account=service_account, network=network, create_request_timeout=create_request_timeout, @@ -578,12 +584,34 @@ def rerun( pipeline_job.original_pipeline_job_id = int( original_pipeline_job.labels["vertex-ai-pipelines-run-billing-id"] ) + original_pipeline_task_details = ( + original_pipeline_job.job_detail.task_details + ) except Exception as e: raise ValueError( f"Failed to get original pipeline job: {original_pipelinejob_name}" ) from e - pipeline_job.pipeline_task_rerun_configs = pipeline_task_rerun_configs + task_id_to_task_rerun_config = {} + for task_rerun_config in pipeline_task_rerun_configs: + task_id_to_task_rerun_config[task_rerun_config.task_id] = task_rerun_config + + pipeline_job.pipeline_task_rerun_configs = [] + for task_detail in original_pipeline_task_details: + if task_detail.task_id in task_id_to_task_rerun_config: + task_rerun_config = task_id_to_task_rerun_config[task_detail.task_id] + if task_detail.task_unique_name: + task_rerun_config.task_name = task_detail.task_unique_name + pipeline_job.pipeline_task_rerun_configs.append(task_rerun_config) + else: + pipeline_job.pipeline_task_rerun_configs.append( + aiplatform_v1beta1.PipelineTaskRerunConfig( + task_id=task_detail.task_id, + task_name=task_detail.task_unique_name, + skip_task=task_detail.state + == aiplatform_v1beta1.PipelineTaskDetail.State.SUCCEEDED, + ) + ) if parameter_values: runtime_config = self._v1_beta1_pipeline_job.runtime_config diff --git a/google/cloud/aiplatform/preview/pipelinejobschedule/pipeline_job_schedules.py b/google/cloud/aiplatform/preview/pipelinejobschedule/pipeline_job_schedules.py index 2c119c4922..02d0f97434 100644 --- a/google/cloud/aiplatform/preview/pipelinejobschedule/pipeline_job_schedules.py +++ b/google/cloud/aiplatform/preview/pipelinejobschedule/pipeline_job_schedules.py @@ -75,6 +75,7 @@ def create( allow_queueing: bool = False, max_run_count: Optional[int] = None, max_concurrent_run_count: int = 1, + max_concurrent_active_run_count: Optional[int] = None, service_account: Optional[str] = None, network: Optional[str] = None, create_request_timeout: Optional[float] = None, @@ -101,6 +102,10 @@ def create( Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. + max_concurrent_active_run_count (int): + Optional. Maximum number of active runs that can be executed + concurrently for this PipelineJobSchedule. Active runs are those + in a non-terminal state (e.g., RUNNING, PENDING, or QUEUED). service_account (str): Optional. Specifies the service account for workload run-as account. Users submitting jobs must have act-as permission on this run-as account. @@ -120,6 +125,7 @@ def create( allow_queueing=allow_queueing, max_run_count=max_run_count, max_concurrent_run_count=max_concurrent_run_count, + max_concurrent_active_run_count=max_concurrent_active_run_count, service_account=service_account, network=network, create_request_timeout=create_request_timeout, @@ -190,6 +196,7 @@ def update( allow_queueing: Optional[bool] = None, max_run_count: Optional[int] = None, max_concurrent_run_count: Optional[int] = None, + max_concurrent_active_run_count: Optional[int] = None, ) -> None: """Update an existing PipelineJobSchedule. @@ -222,6 +229,10 @@ def update( Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. + max_concurrent_active_run_count (int): + Optional. Maximum number of active runs that can be executed + concurrently for this PipelineJobSchedule. Active runs are those + in a non-terminal state (e.g., RUNNING, PENDING, or QUEUED). Raises: RuntimeError: User tried to call update() before create(). @@ -234,4 +245,5 @@ def update( allow_queueing=allow_queueing, max_run_count=max_run_count, max_concurrent_run_count=max_concurrent_run_count, + max_concurrent_active_run_count=max_concurrent_active_run_count, ) diff --git a/google/cloud/aiplatform/preview/vertex_ray/__init__.py b/google/cloud/aiplatform/preview/vertex_ray/__init__.py index 8e58f0e7da..af3c6e24d8 100644 --- a/google/cloud/aiplatform/preview/vertex_ray/__init__.py +++ b/google/cloud/aiplatform/preview/vertex_ray/__init__.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ import sys from google.cloud.aiplatform.vertex_ray.bigquery_datasource import ( - BigQueryDatasource, + _BigQueryDatasource, ) from google.cloud.aiplatform.vertex_ray.client_builder import ( VertexRayClientBuilder as ClientBuilder, @@ -44,13 +44,13 @@ get_job_submission_client_cluster_info, ) -if sys.version_info[1] != 10: +if sys.version_info[1] not in (10, 11): print( - "[Ray on Vertex]: The client environment with Python version 3.10 is required." + "[Ray on Vertex]: The client environment with Python version 3.10 or 3.11 is required." ) __all__ = ( - "BigQueryDatasource", + "_BigQueryDatasource", "data", "ClientBuilder", "get_job_submission_client_cluster_info", diff --git a/google/cloud/aiplatform/releases.txt b/google/cloud/aiplatform/releases.txt index ad3b915fdf..273b510917 100644 --- a/google/cloud/aiplatform/releases.txt +++ b/google/cloud/aiplatform/releases.txt @@ -1,4 +1,4 @@ Use this file when you need to force a patch release with release-please. Edit line 4 below with the version for the release. -1.55.0 \ No newline at end of file +1.148.0 \ No newline at end of file diff --git a/google/cloud/aiplatform/schedules.py b/google/cloud/aiplatform/schedules.py index 2083a60dbc..6ea72d4ce7 100644 --- a/google/cloud/aiplatform/schedules.py +++ b/google/cloud/aiplatform/schedules.py @@ -186,6 +186,16 @@ def max_concurrent_run_count(self) -> int: self._sync_gca_resource() return self._gca_resource.max_concurrent_run_count + @property + def max_concurrent_active_run_count(self) -> int: + """Current Schedule max_concurrent_active_run_count. + + Returns: + Schedule max_concurrent_active_run_count. + """ + self._sync_gca_resource() + return self._gca_resource.max_concurrent_active_run_count + @property def allow_queueing(self) -> bool: """Whether current Schedule allows queueing. diff --git a/google/cloud/aiplatform/tensorboard/plugins/tf_profiler/profile_uploader.py b/google/cloud/aiplatform/tensorboard/plugins/tf_profiler/profile_uploader.py index cb8f119172..25ca6fe9b1 100644 --- a/google/cloud/aiplatform/tensorboard/plugins/tf_profiler/profile_uploader.py +++ b/google/cloud/aiplatform/tensorboard/plugins/tf_profiler/profile_uploader.py @@ -24,7 +24,9 @@ from typing import DefaultDict, Dict, Generator, List, Optional, Set, Tuple from google.cloud import storage -from google.cloud.aiplatform.compat.services import tensorboard_service_client +from google.cloud.aiplatform.compat.services import ( + tensorboard_service_client, +) from google.cloud.aiplatform.compat.types import tensorboard_data from google.cloud.aiplatform.compat.types import tensorboard_service from google.cloud.aiplatform.compat.types import tensorboard_time_series @@ -144,7 +146,7 @@ def _is_valid_event(self, run_name: str) -> bool: return tf.io.gfile.isdir(self._profile_dir(run_name)) def _profile_dir(self, run_name: str) -> str: - """Converts run name to full profile path. + """Converts run name to a full, validated profile path under `logdir`. Args: run_name (str): @@ -153,9 +155,23 @@ def _profile_dir(self, run_name: str) -> str: Returns: Full path for run name. """ + logger.debug("_profile_dir run_name: %s", run_name) + base = os.path.realpath(self._logdir) if run_name is None or run_name == uploader_utils.DEFAULT_PROFILE_RUN_NAME: - return os.path.join(self._logdir, self.PROFILE_PATH) - return os.path.join(self._logdir, run_name, self.PROFILE_PATH) + candidate = os.path.join(base, self.PROFILE_PATH) + else: + # Normalize and forbid absolute/traversal components in `run_name`. + if os.path.isabs(run_name): + raise ValueError("run_name must be a relative name") + # Collapse dot segments then strip leading separators + safe_name = os.path.normpath(run_name).lstrip("/\\") + if safe_name.startswith("..") or (".." + os.sep) in safe_name: + raise ValueError("run_name must not contain path traversal") + candidate = os.path.join(base, safe_name, self.PROFILE_PATH) + resolved = os.path.realpath(candidate) + if os.path.commonpath([base, resolved]) != base: + raise ValueError("profile path escapes logdir") + return resolved def send_request(self, run_name: str): """Accepts run_name and sends an RPC request if an event is detected. @@ -182,9 +198,9 @@ def send_request(self, run_name: str): ) if run_name not in self._run_to_file_request_sender: - self._run_to_file_request_sender[ - run_name - ] = self._file_request_sender_factory(tb_run) + self._run_to_file_request_sender[run_name] = ( + self._file_request_sender_factory(tb_run) + ) # Loop through any of the profiling sessions within this training run. # A training run can have multiple profile sessions. diff --git a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py index 302df9614d..87f840b3b2 100644 --- a/google/cloud/aiplatform/tensorboard/tensorboard_resource.py +++ b/google/cloud/aiplatform/tensorboard/tensorboard_resource.py @@ -388,9 +388,11 @@ def __init__( ) self._gca_resource = self._get_gca_resource( resource_name=tensorboard_experiment_name, - parent_resource_name_fields={Tensorboard._resource_noun: tensorboard_id} - if tensorboard_id - else tensorboard_id, + parent_resource_name_fields=( + {Tensorboard._resource_noun: tensorboard_id} + if tensorboard_id + else tensorboard_id + ), ) @classmethod @@ -646,12 +648,14 @@ def __init__( ) self._gca_resource = self._get_gca_resource( resource_name=tensorboard_run_name, - parent_resource_name_fields={ - Tensorboard._resource_noun: tensorboard_id, - TensorboardExperiment._resource_noun: tensorboard_experiment_id, - } - if tensorboard_id - else tensorboard_id, + parent_resource_name_fields=( + { + Tensorboard._resource_noun: tensorboard_id, + TensorboardExperiment._resource_noun: tensorboard_experiment_id, + } + if tensorboard_id + else tensorboard_id + ), ) self._time_series_display_name_to_id_mapping = ( @@ -991,9 +995,9 @@ def create_tensorboard_time_series( credentials=self.credentials, ) - self._time_series_display_name_to_id_mapping[ - tb_time_series.display_name - ] = tb_time_series.name + self._time_series_display_name_to_id_mapping[tb_time_series.display_name] = ( + tb_time_series.name + ) return tb_time_series @@ -1128,13 +1132,15 @@ def __init__( ) self._gca_resource = self._get_gca_resource( resource_name=tensorboard_time_series_name, - parent_resource_name_fields={ - Tensorboard._resource_noun: tensorboard_id, - TensorboardExperiment._resource_noun: tensorboard_experiment_id, - TensorboardRun._resource_noun: tensorboard_run_id, - } - if tensorboard_id - else tensorboard_id, + parent_resource_name_fields=( + { + Tensorboard._resource_noun: tensorboard_id, + TensorboardExperiment._resource_noun: tensorboard_experiment_id, + TensorboardRun._resource_noun: tensorboard_run_id, + } + if tensorboard_id + else tensorboard_id + ), ) @classmethod diff --git a/google/cloud/aiplatform/tensorboard/uploader.py b/google/cloud/aiplatform/tensorboard/uploader.py index b77a176004..4f07dab93f 100644 --- a/google/cloud/aiplatform/tensorboard/uploader.py +++ b/google/cloud/aiplatform/tensorboard/uploader.py @@ -25,6 +25,7 @@ from typing import ContextManager, Dict, FrozenSet, Generator, Iterable, Optional, Tuple import uuid +from google.api_core import exceptions from google.cloud import storage from google.cloud.aiplatform import base from google.cloud.aiplatform.compat.services import ( @@ -323,11 +324,16 @@ def _end_experiment_runs(self): for run_name in self._experiment_runs: if run_name: logging.info("Ending run %s", run_name) - run = experiment_run_resource.ExperimentRun.get( - project=self._project, location=self._location, run_name=run_name - ) - if run: - run.update_state(state=gca_execution.Execution.State.COMPLETE) + try: + run = experiment_run_resource.ExperimentRun.get( + project=self._project, + location=self._location, + run_name=run_name, + ) + if run: + run.update_state(state=gca_execution.Execution.State.COMPLETE) + except exceptions.MethodNotImplemented: + logging.warning("Failed to end run %s", run_name) def start_uploading(self): """Blocks forever to continuously upload data from the logdir. @@ -380,7 +386,7 @@ def _pre_create_runs_and_time_series(self): run_names = [] run_tag_name_to_time_series_proto = {} - for (run_name, events) in run_to_events.items(): + for run_name, events in run_to_events.items(): run_name = ( run_name if (run_name and run_name != ".") @@ -409,13 +415,13 @@ def _pre_create_runs_and_time_series(self): tensorboard_time_series.TensorboardTimeSeries.ValueType.BLOB_SEQUENCE ) - run_tag_name_to_time_series_proto[ - (run_name, value.tag) - ] = tensorboard_time_series.TensorboardTimeSeries( - display_name=value.tag, - value_type=value_type, - plugin_name=metadata.plugin_data.plugin_name, - plugin_data=metadata.plugin_data.content, + run_tag_name_to_time_series_proto[(run_name, value.tag)] = ( + tensorboard_time_series.TensorboardTimeSeries( + display_name=value.tag, + value_type=value_type, + plugin_name=metadata.plugin_data.plugin_name, + plugin_data=metadata.plugin_data.content, + ) ) experiment_runs = [uploader_utils.reformat_run_name(run) for run in run_names] @@ -711,7 +717,7 @@ def dispatch_requests( run_to_events: Mapping from run name to generator of `tf.compat.v1.Event` values, as returned by `LogdirLoader.get_run_events`. """ - for (run_name, events) in run_to_events.items(): + for run_name, events in run_to_events.items(): self._dispatch_additional_senders(run_name) if events is not None: for event in events: @@ -1396,7 +1402,7 @@ def _prune_empty_time_series( request: tensorboard_service.WriteTensorboardRunDataRequest, ): """Removes empty time_series from request.""" - for (time_series_idx, time_series_data) in reversed( + for time_series_idx, time_series_data in reversed( list(enumerate(request.time_series_data)) ): if not time_series_data.values: diff --git a/google/cloud/aiplatform/tensorboard/uploader_constants.py b/google/cloud/aiplatform/tensorboard/uploader_constants.py index 6c82210f82..16e3c3672a 100644 --- a/google/cloud/aiplatform/tensorboard/uploader_constants.py +++ b/google/cloud/aiplatform/tensorboard/uploader_constants.py @@ -2,29 +2,25 @@ import dataclasses -from tensorboard.plugins.distribution import ( - metadata as distribution_metadata, -) -from tensorboard.plugins.graph import metadata as graphs_metadata -from tensorboard.plugins.histogram import ( - metadata as histogram_metadata, -) -from tensorboard.plugins.hparams import metadata as hparams_metadata -from tensorboard.plugins.image import metadata as images_metadata -from tensorboard.plugins.scalar import metadata as scalar_metadata -from tensorboard.plugins.text import metadata as text_metadata -from tensorboard_plugin_profile import profile_plugin - +from tensorboard.plugins.distribution import distributions_plugin +from tensorboard.plugins.graph import graphs_plugin +from tensorboard.plugins.histogram import histograms_plugin +from tensorboard.plugins.hparams import hparams_plugin +from tensorboard.plugins.image import images_plugin +from tensorboard.plugins.scalar import scalars_plugin +from tensorboard.plugins.text import text_plugin + +PROFILE_PLUGIN_NAME = "profile" ALLOWED_PLUGINS = frozenset( [ - scalar_metadata.PLUGIN_NAME, - histogram_metadata.PLUGIN_NAME, - distribution_metadata.PLUGIN_NAME, - text_metadata.PLUGIN_NAME, - hparams_metadata.PLUGIN_NAME, - images_metadata.PLUGIN_NAME, - graphs_metadata.PLUGIN_NAME, - profile_plugin.PLUGIN_NAME, + scalars_plugin.ScalarsPlugin.plugin_name, + histograms_plugin.HistogramsPlugin.plugin_name, + distributions_plugin.DistributionsPlugin.plugin_name, + text_plugin.TextPlugin.plugin_name, + hparams_plugin.HParamsPlugin.plugin_name, + images_plugin.ImagesPlugin.plugin_name, + graphs_plugin.GraphsPlugin.plugin_name, + PROFILE_PLUGIN_NAME, ] ) diff --git a/google/cloud/aiplatform/tensorboard/uploader_utils.py b/google/cloud/aiplatform/tensorboard/uploader_utils.py index e534220755..1f5ddc639c 100644 --- a/google/cloud/aiplatform/tensorboard/uploader_utils.py +++ b/google/cloud/aiplatform/tensorboard/uploader_utils.py @@ -277,9 +277,9 @@ def get_time_series_resource_name( tag_name, time_series_resource_creator, ) - self._run_tag_name_to_time_series_name[ - (run_name, tag_name) - ] = time_series.name + self._run_tag_name_to_time_series_name[(run_name, tag_name)] = ( + time_series.name + ) return self._run_tag_name_to_time_series_name[(run_name, tag_name)] def _create_or_get_time_series( diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 0669876a4c..71dce9e3f5 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -45,6 +45,7 @@ training_pipeline as gca_training_pipeline, study as gca_study_compat, custom_job as gca_custom_job_compat, + service_networking as gca_service_networking, ) from google.cloud.aiplatform.utils import _timestamped_gcs_dir @@ -1553,6 +1554,9 @@ def _prepare_training_task_inputs_and_output_dir( persistent_resource_id: Optional[str] = None, scheduling_strategy: Optional[gca_custom_job_compat.Scheduling.Strategy] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Tuple[Dict, str]: """Prepares training task inputs and output directory for custom job. @@ -1617,6 +1621,8 @@ def _prepare_training_task_inputs_and_output_dir( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. The PSC interface config for the job. Returns: Training task inputs and Output directory for custom job. """ @@ -1645,6 +1651,8 @@ def _prepare_training_task_inputs_and_output_dir( training_task_inputs["enable_dashboard_access"] = enable_dashboard_access if persistent_resource_id: training_task_inputs["persistent_resource_id"] = persistent_resource_id + if psc_interface_config: + training_task_inputs["psc_interface_config"] = psc_interface_config if ( timeout @@ -2628,9 +2636,9 @@ def _run( training_task_inputs_dict["windowConfig"] = window_config if enable_probabilistic_inference: - training_task_inputs_dict[ - "enableProbabilisticInference" - ] = enable_probabilistic_inference + training_task_inputs_dict["enableProbabilisticInference"] = ( + enable_probabilistic_inference + ) final_export_eval_bq_uri = export_evaluated_data_items_bigquery_destination_uri if final_export_eval_bq_uri and not final_export_eval_bq_uri.startswith( @@ -2645,9 +2653,9 @@ def _run( } if self._additional_experiments: - training_task_inputs_dict[ - "additionalExperiments" - ] = self._additional_experiments + training_task_inputs_dict["additionalExperiments"] = ( + self._additional_experiments + ) model = gca_model.Model( display_name=model_display_name or self._display_name, @@ -3055,6 +3063,9 @@ def run( reservation_affinity_key: Optional[str] = None, reservation_affinity_values: Optional[List[str]] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Optional[models.Model]: """Runs the custom training job. @@ -3433,6 +3444,9 @@ def run( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. Returns: The trained Vertex AI model resource or None if the training @@ -3504,6 +3518,7 @@ def run( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) def submit( @@ -3564,6 +3579,9 @@ def submit( reservation_affinity_key: Optional[str] = None, reservation_affinity_values: Optional[List[str]] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Optional[models.Model]: """Submits the custom training job without blocking until completion. @@ -3887,6 +3905,9 @@ def submit( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -3958,6 +3979,7 @@ def submit( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) @base.optional_sync(construct_object_on_arg="managed_model") @@ -4007,6 +4029,9 @@ def _run( persistent_resource_id: Optional[str] = None, scheduling_strategy: Optional[gca_custom_job_compat.Scheduling.Strategy] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Optional[models.Model]: """Packages local script and launches training_job. @@ -4209,6 +4234,8 @@ def _run( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. The PSC interface config for the job. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -4265,6 +4292,7 @@ def _run( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) model = self._run_job( @@ -4304,7 +4332,7 @@ def __init__( # TODO(b/223262536): Make display_name parameter fully optional in next major release display_name: str, container_uri: str, - command: Sequence[str] = None, + command: Optional[Sequence[str]] = None, model_serving_container_image_uri: Optional[str] = None, model_serving_container_predict_route: Optional[str] = None, model_serving_container_health_route: Optional[str] = None, @@ -4596,6 +4624,9 @@ def run( reservation_affinity_key: Optional[str] = None, reservation_affinity_values: Optional[List[str]] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Optional[models.Model]: """Runs the custom training job. @@ -4912,6 +4943,9 @@ def run( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -4982,6 +5016,7 @@ def run( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) def submit( @@ -5042,6 +5077,9 @@ def submit( reservation_affinity_key: Optional[str] = None, reservation_affinity_values: Optional[List[str]] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Optional[models.Model]: """Submits the custom training job without blocking until completion. @@ -5358,6 +5396,9 @@ def submit( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -5428,6 +5469,7 @@ def submit( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) @base.optional_sync(construct_object_on_arg="managed_model") @@ -5476,6 +5518,9 @@ def _run( persistent_resource_id: Optional[str] = None, scheduling_strategy: Optional[gca_custom_job_compat.Scheduling.Strategy] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Optional[models.Model]: """Packages local script and launches training_job. Args: @@ -5674,6 +5719,9 @@ def _run( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -5724,6 +5772,7 @@ def _run( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) model = self._run_job( @@ -6413,9 +6462,9 @@ def _run( } if self._additional_experiments: - training_task_inputs_dict[ - "additionalExperiments" - ] = self._additional_experiments + training_task_inputs_dict["additionalExperiments"] = ( + self._additional_experiments + ) model = gca_model.Model( display_name=model_display_name or self._display_name, @@ -7371,9 +7420,9 @@ def _run( training_task_inputs_dict["baseModelId"] = base_model.name if incremental_train_base_model: - training_task_inputs_dict[ - "uptrainBaseModelId" - ] = incremental_train_base_model.name + training_task_inputs_dict["uptrainBaseModelId"] = ( + incremental_train_base_model.name + ) tunable_parameter_dict: Dict[str, any] = {} @@ -7755,6 +7804,9 @@ def run( reservation_affinity_key: Optional[str] = None, reservation_affinity_values: Optional[List[str]] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Optional[models.Model]: """Runs the custom training job. @@ -8072,6 +8124,9 @@ def run( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -8137,6 +8192,7 @@ def run( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) @base.optional_sync(construct_object_on_arg="managed_model") @@ -8184,6 +8240,9 @@ def _run( persistent_resource_id: Optional[str] = None, scheduling_strategy: Optional[gca_custom_job_compat.Scheduling.Strategy] = None, max_wait_duration: Optional[int] = None, + psc_interface_config: Optional[ + gca_service_networking.PscInterfaceConfig + ] = None, ) -> Optional[models.Model]: """Packages local script and launches training_job. @@ -8367,6 +8426,9 @@ def _run( This is the maximum duration that a job will wait for the requested resources to be provisioned in seconds. If set to 0, the job will wait indefinitely. The default is 30 minutes. + psc_interface_config (gca_service_networking.PscInterfaceConfig): + Optional. Configuration for Private Service Connect interface + used for training. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -8417,6 +8479,7 @@ def _run( persistent_resource_id=persistent_resource_id, scheduling_strategy=scheduling_strategy, max_wait_duration=max_wait_duration, + psc_interface_config=psc_interface_config, ) model = self._run_job( diff --git a/google/cloud/aiplatform/training_utils/cloud_profiler/initializer.py b/google/cloud/aiplatform/training_utils/cloud_profiler/initializer.py index 7abc815078..eb63792e7b 100644 --- a/google/cloud/aiplatform/training_utils/cloud_profiler/initializer.py +++ b/google/cloud/aiplatform/training_utils/cloud_profiler/initializer.py @@ -19,7 +19,9 @@ import threading from typing import Optional, Type -from google.cloud.aiplatform.training_utils.cloud_profiler import cloud_profiler_utils +from google.cloud.aiplatform.training_utils.cloud_profiler import ( + cloud_profiler_utils, +) try: from werkzeug import serving @@ -29,7 +31,9 @@ from google.cloud.aiplatform.training_utils import environment_variables from google.cloud.aiplatform.training_utils.cloud_profiler import webserver -from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import base_plugin +from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import ( + base_plugin, +) from google.cloud.aiplatform.training_utils.cloud_profiler.plugins.tensorflow import ( tf_profiler, ) diff --git a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py index d778176355..0dc1959a35 100644 --- a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py +++ b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tensorboard_api.py @@ -29,7 +29,9 @@ from google.cloud.aiplatform.tensorboard import upload_tracker from google.cloud.aiplatform.tensorboard import uploader_constants from google.cloud.aiplatform.tensorboard import uploader_utils -from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import profile_uploader +from google.cloud.aiplatform.tensorboard.plugins.tf_profiler import ( + profile_uploader, +) from google.cloud.aiplatform.utils import TensorboardClientWithOverride from tensorboard.util import tb_logging diff --git a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tf_profiler.py b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tf_profiler.py index e532db4222..bfbcc65221 100644 --- a/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tf_profiler.py +++ b/google/cloud/aiplatform/training_utils/cloud_profiler/plugins/tensorflow/tf_profiler.py @@ -131,7 +131,7 @@ def _check_tf() -> bool: return False # Check for the tf profiler plugin - if importlib.util.find_spec("tensorboard_plugin_profile") is None: + if importlib.util.find_spec("tensorboard_plugin_profile") is None: # fmt: skip logger.warning( "Could not import tensorboard_plugin_profile, will not run tf profiling service" ) diff --git a/google/cloud/aiplatform/training_utils/cloud_profiler/webserver.py b/google/cloud/aiplatform/training_utils/cloud_profiler/webserver.py index 3f7706bb34..16e39de22b 100644 --- a/google/cloud/aiplatform/training_utils/cloud_profiler/webserver.py +++ b/google/cloud/aiplatform/training_utils/cloud_profiler/webserver.py @@ -20,7 +20,9 @@ import os from google.cloud.aiplatform.training_utils.cloud_profiler import wsgi_types -from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import base_plugin +from google.cloud.aiplatform.training_utils.cloud_profiler.plugins import ( + base_plugin, +) from typing import List from werkzeug import wrappers, Response diff --git a/google/cloud/aiplatform/utils/__init__.py b/google/cloud/aiplatform/utils/__init__.py index b9ba4a46c1..4abe2b07b1 100644 --- a/google/cloud/aiplatform/utils/__init__.py +++ b/google/cloud/aiplatform/utils/__init__.py @@ -18,27 +18,54 @@ import abc import datetime -import pathlib import logging +import pathlib import re -from typing import Any, Callable, Dict, Optional, Type, TypeVar, Tuple, List +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar import uuid -from google.protobuf import timestamp_pb2 - from google.api_core import client_options from google.api_core import gapic_v1 from google.auth import credentials as auth_credentials from google.cloud import storage - from google.cloud.aiplatform import compat -from google.cloud.aiplatform.constants import base as constants from google.cloud.aiplatform import initializer - +from google.cloud.aiplatform.compat.services import ( + dataset_service_client_v1, + deployment_resource_pool_service_client_v1, + endpoint_service_client_v1, + feature_online_store_admin_service_client_v1, + feature_online_store_service_client_v1, + feature_registry_service_client_v1, + featurestore_online_serving_service_client_v1, + featurestore_service_client_v1, + gen_ai_cache_service_client_v1, + index_endpoint_service_client_v1, + index_service_client_v1, + job_service_client_v1, + metadata_service_client_v1, + model_garden_service_client_v1, + model_service_client_v1, + persistent_resource_service_client_v1, + pipeline_service_client_v1, + prediction_service_async_client_v1, + prediction_service_client_v1, + reasoning_engine_execution_async_client_v1, + reasoning_engine_execution_service_client_v1, + reasoning_engine_service_client_v1, + schedule_service_client_v1, + tensorboard_service_client_v1, + vertex_rag_data_service_async_client_v1, + vertex_rag_data_service_client_v1, + vertex_rag_service_async_client_v1, + vertex_rag_service_client_v1, + vizier_service_client_v1, +) from google.cloud.aiplatform.compat.services import ( dataset_service_client_v1beta1, deployment_resource_pool_service_client_v1beta1, endpoint_service_client_v1beta1, + example_store_service_client_v1beta1, extension_execution_service_client_v1beta1, extension_registry_service_client_v1beta1, feature_online_store_admin_service_client_v1beta1, @@ -47,58 +74,35 @@ featurestore_online_serving_service_client_v1beta1, featurestore_service_client_v1beta1, gen_ai_cache_service_client_v1beta1, - index_service_client_v1beta1, index_endpoint_service_client_v1beta1, + index_service_client_v1beta1, job_service_client_v1beta1, match_service_client_v1beta1, metadata_service_client_v1beta1, - model_service_client_v1beta1, + model_garden_service_client_v1beta1, model_monitoring_service_client_v1beta1, + model_service_client_v1beta1, + persistent_resource_service_client_v1beta1, pipeline_service_client_v1beta1, - prediction_service_client_v1beta1, prediction_service_async_client_v1beta1, + prediction_service_client_v1beta1, + reasoning_engine_execution_service_client_v1beta1, + reasoning_engine_service_client_v1beta1, schedule_service_client_v1beta1, tensorboard_service_client_v1beta1, - vizier_service_client_v1beta1, - model_garden_service_client_v1beta1, - persistent_resource_service_client_v1beta1, - reasoning_engine_service_client_v1beta1, - reasoning_engine_execution_service_client_v1beta1, vertex_rag_data_service_async_client_v1beta1, vertex_rag_data_service_client_v1beta1, + vertex_rag_service_async_client_v1beta1, vertex_rag_service_client_v1beta1, + vizier_service_client_v1beta1, ) -from google.cloud.aiplatform.compat.services import ( - dataset_service_client_v1, - deployment_resource_pool_service_client_v1, - endpoint_service_client_v1, - feature_online_store_admin_service_client_v1, - feature_online_store_service_client_v1, - feature_registry_service_client_v1, - featurestore_online_serving_service_client_v1, - featurestore_service_client_v1, - index_service_client_v1, - index_endpoint_service_client_v1, - job_service_client_v1, - metadata_service_client_v1, - model_garden_service_client_v1, - model_service_client_v1, - pipeline_service_client_v1, - prediction_service_client_v1, - prediction_service_async_client_v1, - schedule_service_client_v1, - tensorboard_service_client_v1, - vizier_service_client_v1, - persistent_resource_service_client_v1, - vertex_rag_data_service_async_client_v1, - vertex_rag_data_service_client_v1, - vertex_rag_service_client_v1, -) - from google.cloud.aiplatform.compat.types import ( accelerator_type as gca_accelerator_type, reservation_affinity_v1 as gca_reservation_affinity_v1, ) +from google.cloud.aiplatform.constants import base as constants + +from google.protobuf import timestamp_pb2 VertexAiServiceClient = TypeVar( "VertexAiServiceClient", @@ -556,15 +560,17 @@ def __init__( kwargs["transport"] = transport self._clients = { - version: self.WrappedClient( - client_class=client_class, - client_options=client_options, - client_info=client_info, - credentials=credentials, - transport=transport, + version: ( + self.WrappedClient( + client_class=client_class, + client_options=client_options, + client_info=client_info, + credentials=credentials, + transport=transport, + ) + if self._is_temporary + else client_class(**kwargs) ) - if self._is_temporary - else client_class(**kwargs) for version, client_class in self._version_map } @@ -805,8 +811,7 @@ class GenAiCacheServiceClientWithOverride(ClientWithOverride): _version_map = ( ( compat.V1, - # TODO(b/342585299): Temporary code. Switch to v1 once v1 is available. - gen_ai_cache_service_client_v1beta1.GenAiCacheServiceClient, + gen_ai_cache_service_client_v1.GenAiCacheServiceClient, ), ( compat.V1BETA1, @@ -949,6 +954,17 @@ class PersistentResourceClientWithOverride(ClientWithOverride): ) +class ExampleStoreClientWithOverride(ClientWithOverride): + _is_temporary = True + _default_version = compat.V1BETA1 + _version_map = ( + ( + compat.V1BETA1, + example_store_service_client_v1beta1.ExampleStoreServiceClient, + ), + ) + + class ReasoningEngineClientWithOverride(ClientWithOverride): _is_temporary = True _default_version = compat.V1BETA1 @@ -971,6 +987,39 @@ class ReasoningEngineExecutionClientWithOverride(ClientWithOverride): ) +class AgentEngineClientWithOverride(ClientWithOverride): + _is_temporary = True + _default_version = compat.V1 + _version_map = ( + ( + compat.V1, + reasoning_engine_service_client_v1.ReasoningEngineServiceClient, + ), + ) + + +class AgentEngineExecutionClientWithOverride(ClientWithOverride): + _is_temporary = True + _default_version = compat.V1 + _version_map = ( + ( + compat.V1, + reasoning_engine_execution_service_client_v1.ReasoningEngineExecutionServiceClient, + ), + ) + + +class AgentEngineExecutionAsyncClientWithOverride(ClientWithOverride): + _is_temporary = True + _default_version = compat.V1 + _version_map = ( + ( + compat.V1, + reasoning_engine_execution_async_client_v1.ReasoningEngineExecutionServiceAsyncClient, + ), + ) + + class VertexRagDataClientWithOverride(ClientWithOverride): _is_temporary = True _default_version = compat.DEFAULT_VERSION @@ -1003,7 +1052,25 @@ class VertexRagClientWithOverride(ClientWithOverride): _default_version = compat.DEFAULT_VERSION _version_map = ( (compat.V1, vertex_rag_service_client_v1.VertexRagServiceClient), - (compat.V1BETA1, vertex_rag_service_client_v1beta1.VertexRagServiceClient), + ( + compat.V1BETA1, + vertex_rag_service_client_v1beta1.VertexRagServiceClient, + ), + ) + + +class VertexRagAsyncClientWithOverride(ClientWithOverride): + _is_temporary = True + _default_version = compat.DEFAULT_VERSION + _version_map = ( + ( + compat.V1, + vertex_rag_service_async_client_v1.VertexRagServiceAsyncClient, + ), + ( + compat.V1BETA1, + vertex_rag_service_async_client_v1beta1.VertexRagServiceAsyncClient, + ), ) @@ -1026,6 +1093,8 @@ class VertexRagClientWithOverride(ClientWithOverride): PersistentResourceClientWithOverride, ReasoningEngineClientWithOverride, ReasoningEngineExecutionClientWithOverride, + AgentEngineClientWithOverride, + AgentEngineExecutionClientWithOverride, ModelMonitoringClientWithOverride, ) diff --git a/google/cloud/aiplatform/utils/_ipython_utils.py b/google/cloud/aiplatform/utils/_ipython_utils.py index bde399b313..afa6f3f3d7 100644 --- a/google/cloud/aiplatform/utils/_ipython_utils.py +++ b/google/cloud/aiplatform/utils/_ipython_utils.py @@ -17,6 +17,7 @@ import sys import typing +import urllib from uuid import uuid4 from typing import Optional @@ -145,7 +146,7 @@ def display_link(text: str, url: str, icon: Optional[str] = "open_in_new") -> No """ - from IPython.core.display import display + from IPython.display import display from IPython.display import HTML display(HTML(html)) @@ -266,3 +267,18 @@ def display_browse_prebuilt_metrics_button() -> None: "https://cloud.google.com/vertex-ai/generative-ai/docs/models/metrics-templates" ) display_link("Browse pre-built metrics", uri, "list") + + +def display_gen_ai_evaluation_results_button( + gcs_file_path: Optional[str] = None, +) -> None: + """Function to generate a link bound to the Gen AI evaluation run.""" + if not is_ipython_available(): + return + + uri = "https://cloud.google.com/vertex-ai/generative-ai/docs/models/view-evaluation" + if gcs_file_path is not None: + gcs_file_path = urllib.parse.quote(gcs_file_path) + uri = f"https://console.cloud.google.com/storage/browser/_details/{gcs_file_path};colab_enterprise=gen_ai_evaluation" + + display_link("View evaluation results", uri, "bar_chart") diff --git a/google/cloud/aiplatform/utils/featurestore_utils.py b/google/cloud/aiplatform/utils/featurestore_utils.py index f40cff1f26..7cc190752e 100644 --- a/google/cloud/aiplatform/utils/featurestore_utils.py +++ b/google/cloud/aiplatform/utils/featurestore_utils.py @@ -18,7 +18,9 @@ import re from typing import Dict, NamedTuple, Optional -from google.cloud.aiplatform.compat.services import featurestore_service_client +from google.cloud.aiplatform.compat.services import ( + featurestore_service_client, +) from google.cloud.aiplatform.compat.types import ( feature as gca_feature, featurestore_service as gca_featurestore_service, diff --git a/google/cloud/aiplatform/utils/gcs_utils.py b/google/cloud/aiplatform/utils/gcs_utils.py index 144e5c415d..5bebd9ee01 100644 --- a/google/cloud/aiplatform/utils/gcs_utils.py +++ b/google/cloud/aiplatform/utils/gcs_utils.py @@ -17,14 +17,20 @@ import datetime import glob +import uuid + +# Version detection and compatibility layer for google-cloud-storage v2/v3 +from importlib.metadata import version as get_version import logging import os import pathlib import tempfile from typing import Optional, TYPE_CHECKING +import warnings from google.auth import credentials as auth_credentials from google.cloud import storage +from packaging.version import Version from google.cloud.aiplatform import initializer from google.cloud.aiplatform.utils import resource_manager_utils @@ -35,6 +41,97 @@ _logger = logging.getLogger(__name__) +# Detect google-cloud-storage version once at module load +try: + _GCS_VERSION = Version(get_version("google-cloud-storage")) +except Exception: + # Fallback if version detection fails (should not happen in normal use) + _GCS_VERSION = Version("3.0.0") + +_USE_FROM_URI = _GCS_VERSION >= Version("3.0.0") + +# Warn users on v2 about upcoming deprecation +if _GCS_VERSION < Version("3.0.0"): + warnings.warn( + "Support for google-cloud-storage < 3.0.0 will be removed in a future" + " version of google-cloud-aiplatform. Please upgrade to" + " google-cloud-storage >= 3.0.0.", + FutureWarning, + stacklevel=2, + ) + +_DEFAULT_STAGING_BUCKET_SALT = str(uuid.uuid4()) + + +def _verify_bucket_ownership( + bucket: storage.Bucket, + expected_project: str, + client: storage.Client, +) -> bool: + """Verifies that a GCS bucket belongs to the expected project. + + This check mitigates bucket squatting attacks where an attacker creates a + bucket with a predictable name in their own project before the victim does. + + Args: + bucket: The GCS bucket to verify. + expected_project: The project ID that should own the bucket. + client: Storage client instance. + + Returns: + True if the bucket belongs to the expected project, False otherwise. + """ + try: + bucket.reload(client=client) + bucket_project_number = str(bucket.project_number) + expected_project_number = str( + resource_manager_utils.get_project_number(expected_project) + ) + return bucket_project_number == expected_project_number + except Exception: + return False + + +def blob_from_uri(uri: str, client: storage.Client) -> storage.Blob: + """Create a Blob from a GCS URI, compatible with v2 and v3. + + This function provides compatibility across google-cloud-storage versions: + - v3.x: Uses Blob.from_uri() + - v2.x: Uses Blob.from_string() (deprecated in v3) + + Args: + uri: GCS URI (e.g., 'gs://bucket/path/to/blob') + client: Storage client instance + + Returns: + storage.Blob: Blob instance + """ + if _USE_FROM_URI: + return storage.Blob.from_uri(uri, client=client) + else: + return storage.Blob.from_string(uri, client=client) + + +def bucket_from_uri(uri: str, client: storage.Client) -> storage.Bucket: + """Create a Bucket from a GCS URI, compatible with v2 and v3. + + This function provides compatibility across google-cloud-storage versions: + - v3.x: Uses Bucket.from_uri() + - v2.x: Uses Bucket.from_string() (deprecated in v3) + + Args: + uri: GCS bucket URI (e.g., 'gs://bucket-name') + client: Storage client instance + + Returns: + storage.Bucket: Bucket instance + """ + if _USE_FROM_URI: + return storage.Bucket.from_uri(uri, client=client) + else: + return storage.Bucket.from_string(uri, client=client) + + def upload_to_gcs( source_path: str, destination_uri: str, @@ -79,18 +176,18 @@ def upload_to_gcs( destination_file_uri = ( destination_uri.rstrip("/") + "/" + source_file_relative_posix_path ) - _logger.debug(f'Uploading "{source_file_path}" to "{destination_file_uri}"') - destination_blob = storage.Blob.from_string( + _logger.debug( + 'Uploading "%s" to "%s"', source_file_path, destination_file_uri + ) + destination_blob = blob_from_uri( destination_file_uri, client=storage_client ) destination_blob.upload_from_filename(filename=source_file_path) else: source_file_path = source_path destination_file_uri = destination_uri - _logger.debug(f'Uploading "{source_file_path}" to "{destination_file_uri}"') - destination_blob = storage.Blob.from_string( - destination_file_uri, client=storage_client - ) + _logger.debug('Uploading "%s" to "%s"', source_file_path, destination_file_uri) + destination_blob = blob_from_uri(destination_file_uri, client=storage_client) destination_blob.upload_from_filename(filename=source_file_path) @@ -136,9 +233,14 @@ def stage_local_data_in_gcs( # Currently we only do this when staging_gcs_dir is not specified. # The buckets that we create are regional. # This prevents errors when some service required regional bucket. - # E.g. "FailedPrecondition: 400 The Cloud Storage bucket of `gs://...` is in location `us`. It must be in the same regional location as the service location `us-central1`." - # We are making the bucket name region-specific since the bucket is regional. - staging_bucket_name = project + "-vertex-staging-" + location + # E.g. "FailedPrecondition: 400 The Cloud Storage bucket of `gs://...` + # is in location `us`. It must be in the same regional location as the + # service location `us-central1`." + # We are making the bucket name region-specific since the bucket is + # regional. + staging_bucket_name = ( + project + "-vertex-staging-" + location + "-" + _DEFAULT_STAGING_BUCKET_SALT + )[:63] client = storage.Client(project=project, credentials=credentials) staging_bucket = storage.Bucket(client=client, name=staging_bucket_name) if not staging_bucket.exists(): @@ -148,6 +250,17 @@ def stage_local_data_in_gcs( project=project, location=location, ) + else: + # Verify bucket ownership to prevent bucket squatting attacks. + # See b/469987320 for details. + if not _verify_bucket_ownership(staging_bucket, project, client): + raise ValueError( + f'Staging bucket "{staging_bucket_name}" exists but does ' + f'not belong to project "{project}". This may indicate a ' + f"bucket squatting attack. Please provide an explicit " + f"staging_bucket parameter or configure one via " + f"aiplatform.init(staging_bucket='gs://your-bucket')." + ) staging_gcs_dir = "gs://" + staging_bucket_name timestamp = datetime.datetime.now().isoformat(sep="-", timespec="milliseconds") @@ -234,7 +347,7 @@ def create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist( credentials=credentials, ) - pipelines_bucket = storage.Bucket.from_string( + pipelines_bucket = bucket_from_uri( uri=output_artifacts_gcs_dir, client=storage_client, ) @@ -294,9 +407,9 @@ def download_file_from_gcs( credentials = credentials or initializer.global_config.credentials storage_client = storage.Client(project=project, credentials=credentials) - source_blob = storage.Blob.from_string(source_file_uri, client=storage_client) + source_blob = blob_from_uri(source_file_uri, client=storage_client) - _logger.debug(f'Downloading "{source_file_uri}" to "{destination_file_path}"') + _logger.debug('Downloading "%s" to "%s"', source_file_uri, destination_file_path) source_blob.download_to_filename(filename=destination_file_path) @@ -354,13 +467,10 @@ def _upload_pandas_df_to_gcs( """Uploads the provided Pandas DataFrame to a GCS bucket. Args: - df (pandas.DataFrame): - Required. The Pandas DataFrame to upload. - upload_gcs_path (str): - Required. The GCS path to upload the data file. - file_format (str): - Required. The format to export the DataFrame to. Currently - only JSONL is supported. + df (pandas.DataFrame): Required. The Pandas DataFrame to upload. + upload_gcs_path (str): Required. The GCS path to upload the data file. + file_format (str): Required. The format to export the DataFrame to. + Currently only JSONL is supported. Raises: ValueError: When a file format other than JSONL is provided. @@ -378,9 +488,9 @@ def _upload_pandas_df_to_gcs( project=initializer.global_config.project, credentials=initializer.global_config.credentials, ) - storage.Blob.from_string( - uri=upload_gcs_path, client=storage_client - ).upload_from_filename(filename=local_dataset_path) + blob_from_uri(uri=upload_gcs_path, client=storage_client).upload_from_filename( + filename=local_dataset_path + ) def validate_gcs_path(gcs_path: str) -> None: diff --git a/google/cloud/aiplatform/utils/pipeline_utils.py b/google/cloud/aiplatform/utils/pipeline_utils.py index bc86b73237..88323efe76 100644 --- a/google/cloud/aiplatform/utils/pipeline_utils.py +++ b/google/cloud/aiplatform/utils/pipeline_utils.py @@ -261,7 +261,7 @@ def _get_vertex_value( def _parse_runtime_parameters( - runtime_config_spec: Mapping[str, Any] + runtime_config_spec: Mapping[str, Any], ) -> Optional[Dict[str, Any]]: """Extracts runtime parameters from runtime config json spec. diff --git a/google/cloud/aiplatform/utils/prediction_utils.py b/google/cloud/aiplatform/utils/prediction_utils.py index c6a15d712c..fb65c3c2ce 100644 --- a/google/cloud/aiplatform/utils/prediction_utils.py +++ b/google/cloud/aiplatform/utils/prediction_utils.py @@ -26,6 +26,10 @@ from google.cloud import storage from google.cloud.aiplatform.constants import prediction from google.cloud.aiplatform.utils import path_utils +from google.cloud.aiplatform.compat.types import ( + machine_resources_v1beta1 as gca_machine_resources_compat, +) +from google.protobuf import duration_pb2 _logger = logging.getLogger(__name__) @@ -151,3 +155,30 @@ def download_model_artifacts(artifact_uri: str) -> None: else: # Copy files to the current working directory. shutil.copytree(artifact_uri, ".", dirs_exist_ok=True) + + +def add_flex_start_to_dedicated_resources( + dedicated_resources: gca_machine_resources_compat.DedicatedResources, + max_runtime_duration: Optional[int] = None, +) -> None: + """Adds FlexStart configuration to DedicatedResources if max_runtime_duration is provided. + + Args: + dedicated_resources (gca_machine_resources_compat.DedicatedResources): + Required. The DedicatedResources object to modify. + max_runtime_duration (int): + Optional. The maximum runtime duration in seconds. If provided and + greater than 0, a FlexStart configuration will be added. + """ + if max_runtime_duration is not None and max_runtime_duration > 0: + dedicated_resources.flex_start = gca_machine_resources_compat.FlexStart( + max_runtime_duration=duration_pb2.Duration(seconds=max_runtime_duration) + ) + + +def is_extension_allowed( + filename: str, allowed_extensions: Optional[list[str]] +) -> bool: + if allowed_extensions is None: + return True + return any(filename.endswith(ext) for ext in allowed_extensions) diff --git a/google/cloud/aiplatform/utils/yaml_utils.py b/google/cloud/aiplatform/utils/yaml_utils.py index 4c660957a1..fbda7d1080 100644 --- a/google/cloud/aiplatform/utils/yaml_utils.py +++ b/google/cloud/aiplatform/utils/yaml_utils.py @@ -22,6 +22,7 @@ from google.auth import transport from google.cloud import storage from google.cloud.aiplatform.constants import pipeline as pipeline_constants +from google.cloud.aiplatform.utils.gcs_utils import blob_from_uri # Pattern for an Artifact Registry URL. _VALID_AR_URL = pipeline_constants._VALID_AR_URL @@ -98,7 +99,7 @@ def _load_yaml_from_gs_uri( """ yaml = _maybe_import_yaml() storage_client = storage.Client(project=project, credentials=credentials) - blob = storage.Blob.from_string(uri, storage_client) + blob = blob_from_uri(uri, storage_client) return yaml.safe_load(blob.download_as_bytes()) diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py index a68981e717..3fc6a1f76b 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py index f66d2ef6a8..241b535dd6 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,18 @@ gapic_version as package_version, ) +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .types.image_classification import ImageClassificationPredictionInstance from .types.image_object_detection import ImageObjectDetectionPredictionInstance @@ -30,6 +40,100 @@ from .types.video_classification import VideoClassificationPredictionInstance from .types.video_object_tracking import VideoObjectTrackingPredictionInstance +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform.v1.schema.predict.instance_v1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform.v1.schema.predict.instance_v1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.aiplatform.v1.schema.predict.instance_v1" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( "ImageClassificationPredictionInstance", "ImageObjectDetectionPredictionInstance", diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py index fb0017f80e..0aea2a22c7 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py index 5f9b23110c..1a1a1e3f2a 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py index d720224bb3..942bf18280 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py index 1b53d8eefa..1d9021b823 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py index 68a2563f48..01d5160303 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py index 7138ceb766..ed2ca77094 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py index b96cbe0ee4..39c6ddac18 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py index 02db1455a4..bdca6c28a2 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py index 0111c6e158..6d2643abaf 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py index 2d477e1f6d..0c891d3e62 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/google/cloud/aiplatform/v1/schema/predict/params/__init__.py index 5d3f1f1e8e..2f9f23dfd8 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py index 8195bfa390..afd7096894 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,18 @@ gapic_version as package_version, ) +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .types.image_classification import ImageClassificationPredictionParams from .types.image_object_detection import ImageObjectDetectionPredictionParams @@ -27,6 +37,100 @@ from .types.video_classification import VideoClassificationPredictionParams from .types.video_object_tracking import VideoObjectTrackingPredictionParams +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform.v1.schema.predict.params_v1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform.v1.schema.predict.params_v1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.aiplatform.v1.schema.predict.params_v1" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( "ImageClassificationPredictionParams", "ImageObjectDetectionPredictionParams", diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py index f2ccc83824..ecba43ffd1 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py index 63fa2e09a6..5d0ce9a0fb 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py index 0393e3d609..80bbe1a289 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py index 39603e96c8..1ff783d987 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py index bf0eefe6a9..85b978810f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py index aad1e5a72d..ad67a0ffdd 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py index f59572b63e..bb869f5a47 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py index 1cad817597..53b3d469c9 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py index 1257a0f73a..bcbdee2924 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,18 @@ gapic_version as package_version, ) +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .types.classification import ClassificationPredictionResult from .types.image_object_detection import ImageObjectDetectionPredictionResult @@ -31,6 +41,100 @@ from .types.video_classification import VideoClassificationPredictionResult from .types.video_object_tracking import VideoObjectTrackingPredictionResult +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform.v1.schema.predict.prediction_v1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform.v1.schema.predict.prediction_v1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.aiplatform.v1.schema.predict.prediction_v1" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( "ClassificationPredictionResult", "ImageObjectDetectionPredictionResult", diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py index f9cad3485b..27952c6b7a 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py index 69181f13d8..33a9affa7f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py index ecfb179007..30a57c1c07 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py index 0cd297bc94..8a78d9cd6f 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py index 3265731522..58ba84e71b 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py index 514620b5de..883059a0d3 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py index b9f2758530..b17d648477 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py index 93a735ecb4..cc32d8a50e 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py index 7456e6f9e4..6bd702b541 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py index 846d1bdcab..263b8858d7 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py index 3d6596934c..ff14cbd3a8 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py index 782c72ea41..cac62d73f0 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py index 215f0452f1..941066366b 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,18 @@ gapic_version as package_version, ) +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .types.automl_image_classification import AutoMlImageClassification from .types.automl_image_classification import AutoMlImageClassificationInputs @@ -46,6 +56,100 @@ from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform.v1.schema.trainingjob.definition_v1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform.v1.schema.trainingjob.definition_v1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.aiplatform.v1.schema.trainingjob.definition_v1" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( "AutoMlImageClassification", "AutoMlImageClassificationInputs", diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py index f45889784c..95b68c6caa 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py index 77ff63234a..e2ff695597 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -131,6 +131,7 @@ class ModelType(proto.Enum): but should also have a higher prediction quality than other mobile models. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_TF_LOW_LATENCY_1 = 2 @@ -189,6 +190,7 @@ class SuccessfulStopReason(proto.Enum): increase its quality, since it already has converged. """ + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 BUDGET_REACHED = 1 MODEL_CONVERGED = 2 diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py index 36fbafce11..7feb056ea3 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -125,6 +125,7 @@ class ModelType(proto.Enum): also have a higher prediction quality than other mobile models. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD_HIGH_ACCURACY_1 = 1 CLOUD_LOW_LATENCY_1 = 2 @@ -176,6 +177,7 @@ class SuccessfulStopReason(proto.Enum): increase its quality, since it already has converged. """ + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 BUDGET_REACHED = 1 MODEL_CONVERGED = 2 diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py index 316c17fe4d..3b9f06290c 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -105,6 +105,7 @@ class ModelType(proto.Enum): Expected to have low latency, but may have lower prediction quality than other mobile models. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD_HIGH_ACCURACY_1 = 1 CLOUD_LOW_ACCURACY_1 = 2 @@ -154,6 +155,7 @@ class SuccessfulStopReason(proto.Enum): increase its quality, since it already has converged. """ + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 BUDGET_REACHED = 1 MODEL_CONVERGED = 2 diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py index 08ffa55b59..72a0bb4dbf 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -235,15 +235,15 @@ class AutoTransformation(proto.Message): class NumericTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The value converted to float32. - - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - A boolean value that indicates whether the value is valid. + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal to + 0. Otherwise, this transformation is not applied and the value is + considered a missing value. + - A boolean value that indicates whether the value is valid. Attributes: column_name (str): @@ -268,13 +268,13 @@ class NumericTransformation(proto.Message): class CategoricalTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category gets + its own special lookup index and resulting embedding. Attributes: column_name (str): @@ -289,13 +289,13 @@ class CategoricalTransformation(proto.Message): class TimestampTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the - - timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside of + a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. Attributes: column_name (str): @@ -304,15 +304,15 @@ class TimestampTransformation(proto.Message): The format in which that time field is expressed. The time_format must either be one of: - - ``unix-seconds`` - - ``unix-milliseconds`` - - ``unix-microseconds`` - - ``unix-nanoseconds`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = - ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` + (e.g. 1985-04-12T23:20:50.52Z) invalid_values_allowed (bool): If invalid values is allowed, the training pipeline will create a boolean feature that @@ -337,15 +337,14 @@ class TimestampTransformation(proto.Message): class TextTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - Tokenize text to words. Convert each words to a dictionary lookup - index and generate an embedding for each index. Combine the - embedding of all elements into a single embedding using the mean. - - Tokenization is based on unicode script boundaries. - - Missing values get their own lookup index and resulting - embedding. - - Stop-words receive no special treatment and are not removed. + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting embedding. + - Stop-words receive no special treatment and are not removed. Attributes: column_name (str): @@ -361,9 +360,9 @@ class NumericArrayTransformation(proto.Message): r"""Treats the column as numerical array and performs following transformation functions. - - All transformations for Numerical types applied to the average of - the all elements. - - The average of empty arrays is treated as zero. + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. Attributes: column_name (str): @@ -389,11 +388,11 @@ class CategoricalArrayTransformation(proto.Message): r"""Treats the column as categorical array and performs following transformation functions. - - For each element in the array, convert the category name to a - dictionary lookup index and generate an embedding for each index. - Combine the embedding of all elements into a single embedding - using the mean. - - Empty arrays treated as an embedding of zeroes. + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. Attributes: column_name (str): @@ -409,10 +408,10 @@ class TextArrayTransformation(proto.Message): r"""Treats the column as text array and performs following transformation functions. - - Concatenate all text values in the array into a single text value - using a space (" ") as a delimiter, and then treat the result as - a single text value. Apply the transformations for Text columns. - - Empty arrays treated as an empty text. + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as a + single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. Attributes: column_name (str): @@ -460,13 +459,17 @@ class TextArrayTransformation(proto.Message): oneof="transformation_detail", message="AutoMlTablesInputs.Transformation.TextTransformation", ) - repeated_numeric: "AutoMlTablesInputs.Transformation.NumericArrayTransformation" = proto.Field( + repeated_numeric: ( + "AutoMlTablesInputs.Transformation.NumericArrayTransformation" + ) = proto.Field( proto.MESSAGE, number=6, oneof="transformation_detail", message="AutoMlTablesInputs.Transformation.NumericArrayTransformation", ) - repeated_categorical: "AutoMlTablesInputs.Transformation.CategoricalArrayTransformation" = proto.Field( + repeated_categorical: ( + "AutoMlTablesInputs.Transformation.CategoricalArrayTransformation" + ) = proto.Field( proto.MESSAGE, number=7, oneof="transformation_detail", @@ -520,7 +523,9 @@ class TextArrayTransformation(proto.Message): proto.STRING, number=9, ) - export_evaluated_data_items_config: gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig = proto.Field( + export_evaluated_data_items_config: ( + gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig + ) = proto.Field( proto.MESSAGE, number=10, message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py index 867ac1ac52..26974de391 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py index aa347ee015..1b5a015bca 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py index 8f5f3684be..754c70ab76 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py index b5de8540f5..c305cf3f6b 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -81,6 +81,7 @@ class ModelType(proto.Enum): TensorFlow Lite model and used on a Coral device afterwards. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_VERSATILE_1 = 2 diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py index f6b025b15a..1484267000 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -75,6 +75,7 @@ class ModelType(proto.Enum): ModelService.ExportModel) to a Jetson device afterwards. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_VERSATILE_1 = 2 diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py index 8b3e67ddb8..704d6f4a94 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -88,6 +88,7 @@ class ModelType(proto.Enum): ModelService.ExportModel) and used on an NVIDIA Jetson device. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_VERSATILE_1 = 2 diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py index e1b5a6f196..f0de3bb9be 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ class ExportEvaluatedDataItemsConfig(proto.Message): If not specified, then results are exported to the following auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples + :export_evaluated_examples\_\_.evaluated_examples override_existing_table (bool): If true and an export destination is specified, then the contents of the destination diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py index 599ba32ca7..5cf5db1af4 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py index 33c0c07b51..5abed234cf 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,18 @@ gapic_version as package_version, ) +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .types.image_classification import ImageClassificationPredictionInstance from .types.image_object_detection import ImageObjectDetectionPredictionInstance @@ -30,6 +40,102 @@ from .types.video_classification import VideoClassificationPredictionInstance from .types.video_object_tracking import VideoObjectTrackingPredictionInstance +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = ( + "google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1" + ) + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( "ImageClassificationPredictionInstance", "ImageObjectDetectionPredictionInstance", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py index fb0017f80e..0aea2a22c7 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py index 54d6cf2404..61cae14768 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py index d00aaf9ea1..5706ce93fb 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py index 45e8c93b1c..94d11503e2 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py index a2c6b3370b..9ce199f7be 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py index 4008dc54b7..d2b885078b 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py index 79b0b0e7d5..7f356a6c57 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py index f07216d0c6..bceb593d83 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py index 0e226320d5..6e7b855ba6 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py index d5ad4d13be..016ec4967c 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py index b366ed4a4f..9e82654fc5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py index 5043da6739..b13027e72d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,18 @@ gapic_version as package_version, ) +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .types.image_classification import ImageClassificationPredictionParams from .types.image_object_detection import ImageObjectDetectionPredictionParams @@ -27,6 +37,100 @@ from .types.video_classification import VideoClassificationPredictionParams from .types.video_object_tracking import VideoObjectTrackingPredictionParams +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( "ImageClassificationPredictionParams", "ImageObjectDetectionPredictionParams", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py index f2ccc83824..ecba43ffd1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py index 88eae8fd61..214c2951ac 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py index 8a860e75ca..020ebe77a1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py index 9553e7da6c..e1c5fae209 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py index 1bf3830efe..d963f1a215 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py index eb69cf6559..af27e55208 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py index 1ca86de28a..0f8648a87b 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py index 5b99d46ba2..70aff7bc4d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py index bb94eff50e..b4d113904a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,18 @@ gapic_version as package_version, ) +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .types.classification import ClassificationPredictionResult from .types.image_object_detection import ImageObjectDetectionPredictionResult @@ -32,6 +42,102 @@ from .types.video_classification import VideoClassificationPredictionResult from .types.video_object_tracking import VideoObjectTrackingPredictionResult +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = ( + "google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1" + ) + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( "ClassificationPredictionResult", "ImageObjectDetectionPredictionResult", diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py index a627edaf0d..49d33147bd 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py index e1d94a500e..8c5a82e3c1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py index 79ae8e46ac..78de47da0f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ import proto # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py index da40d8f9f4..e1069bc0c1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py index 0c3add6915..4a0df995b1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py index 080c9caeae..46ce8dd40c 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py index 23f5416841..f525dd6386 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py index 50524b3d5d..85ca3fa85f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py index 5e408cbace..d9a7799c53 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py index d3c64bca03..e1245a4f71 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py index 6312a58b54..451e2d993c 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py index 9582a60457..c31fff397e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ import proto # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.wrappers_pb2 as wrappers_pb2 # type: ignore __protobuf__ = proto.module( diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py index 2f7eab3daa..20ff5f96c8 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py index 6d67eaf5d8..08729de15e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,8 +17,18 @@ gapic_version as package_version, ) +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + from .types.automl_image_classification import AutoMlImageClassification from .types.automl_image_classification import AutoMlImageClassificationInputs @@ -49,6 +59,102 @@ from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = ( + "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1" + ) + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( "AutoMlForecasting", "AutoMlForecastingInputs", diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py index c3f9d35cc5..04238ad005 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py index 1d02c79c32..aa30bf1cab 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -131,6 +131,7 @@ class ModelType(proto.Enum): but should also have a higher prediction quality than other mobile models. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_TF_LOW_LATENCY_1 = 2 @@ -189,6 +190,7 @@ class SuccessfulStopReason(proto.Enum): increase its quality, since it already has converged. """ + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 BUDGET_REACHED = 1 MODEL_CONVERGED = 2 diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py index 0372f88dbc..6c363e9a0a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -125,6 +125,7 @@ class ModelType(proto.Enum): also have a higher prediction quality than other mobile models. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD_HIGH_ACCURACY_1 = 1 CLOUD_LOW_LATENCY_1 = 2 @@ -176,6 +177,7 @@ class SuccessfulStopReason(proto.Enum): increase its quality, since it already has converged. """ + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 BUDGET_REACHED = 1 MODEL_CONVERGED = 2 diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py index 8ac459d088..0a190567aa 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -105,6 +105,7 @@ class ModelType(proto.Enum): Expected to have low latency, but may have lower prediction quality than other mobile models. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD_HIGH_ACCURACY_1 = 1 CLOUD_LOW_ACCURACY_1 = 2 @@ -154,6 +155,7 @@ class SuccessfulStopReason(proto.Enum): increase its quality, since it already has converged. """ + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 BUDGET_REACHED = 1 MODEL_CONVERGED = 2 diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py index 69fb5c82dd..5446fd2f0d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -235,15 +235,15 @@ class AutoTransformation(proto.Message): class NumericTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The value converted to float32. - - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - A boolean value that indicates whether the value is valid. + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal to + 0. Otherwise, this transformation is not applied and the value is + considered a missing value. + - A boolean value that indicates whether the value is valid. Attributes: column_name (str): @@ -268,13 +268,13 @@ class NumericTransformation(proto.Message): class CategoricalTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category gets + its own special lookup index and resulting embedding. Attributes: column_name (str): @@ -289,13 +289,13 @@ class CategoricalTransformation(proto.Message): class TimestampTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the - - timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside of + a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. Attributes: column_name (str): @@ -304,15 +304,15 @@ class TimestampTransformation(proto.Message): The format in which that time field is expressed. The time_format must either be one of: - - ``unix-seconds`` - - ``unix-milliseconds`` - - ``unix-microseconds`` - - ``unix-nanoseconds`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = - ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` + (e.g. 1985-04-12T23:20:50.52Z) invalid_values_allowed (bool): If invalid values is allowed, the training pipeline will create a boolean feature that @@ -337,15 +337,14 @@ class TimestampTransformation(proto.Message): class TextTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - Tokenize text to words. Convert each words to a dictionary lookup - index and generate an embedding for each index. Combine the - embedding of all elements into a single embedding using the mean. - - Tokenization is based on unicode script boundaries. - - Missing values get their own lookup index and resulting - embedding. - - Stop-words receive no special treatment and are not removed. + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting embedding. + - Stop-words receive no special treatment and are not removed. Attributes: column_name (str): @@ -361,9 +360,9 @@ class NumericArrayTransformation(proto.Message): r"""Treats the column as numerical array and performs following transformation functions. - - All transformations for Numerical types applied to the average of - the all elements. - - The average of empty arrays is treated as zero. + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. Attributes: column_name (str): @@ -389,11 +388,11 @@ class CategoricalArrayTransformation(proto.Message): r"""Treats the column as categorical array and performs following transformation functions. - - For each element in the array, convert the category name to a - dictionary lookup index and generate an embedding for each index. - Combine the embedding of all elements into a single embedding - using the mean. - - Empty arrays treated as an embedding of zeroes. + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. Attributes: column_name (str): @@ -409,10 +408,10 @@ class TextArrayTransformation(proto.Message): r"""Treats the column as text array and performs following transformation functions. - - Concatenate all text values in the array into a single text value - using a space (" ") as a delimiter, and then treat the result as - a single text value. Apply the transformations for Text columns. - - Empty arrays treated as an empty text. + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as a + single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. Attributes: column_name (str): @@ -460,13 +459,17 @@ class TextArrayTransformation(proto.Message): oneof="transformation_detail", message="AutoMlTablesInputs.Transformation.TextTransformation", ) - repeated_numeric: "AutoMlTablesInputs.Transformation.NumericArrayTransformation" = proto.Field( + repeated_numeric: ( + "AutoMlTablesInputs.Transformation.NumericArrayTransformation" + ) = proto.Field( proto.MESSAGE, number=6, oneof="transformation_detail", message="AutoMlTablesInputs.Transformation.NumericArrayTransformation", ) - repeated_categorical: "AutoMlTablesInputs.Transformation.CategoricalArrayTransformation" = proto.Field( + repeated_categorical: ( + "AutoMlTablesInputs.Transformation.CategoricalArrayTransformation" + ) = proto.Field( proto.MESSAGE, number=7, oneof="transformation_detail", @@ -520,7 +523,9 @@ class TextArrayTransformation(proto.Message): proto.STRING, number=9, ) - export_evaluated_data_items_config: gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig = proto.Field( + export_evaluated_data_items_config: ( + gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig + ) = proto.Field( proto.MESSAGE, number=10, message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py index ac98071d1e..e85434595e 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py index 2f02aee517..b7c530fbf8 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py index bdffdef1a6..c853f83e09 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py index 9a8ad78ebb..e7ce5d8db3 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -84,23 +84,23 @@ class AutoMlForecastingInputs(proto.Message): The supported optimization objectives: - - "minimize-rmse" (default) - Minimize root-mean-squared - error (RMSE). + - "minimize-rmse" (default) - Minimize root-mean-squared + error (RMSE). - - "minimize-mae" - Minimize mean-absolute error (MAE). + - "minimize-mae" - Minimize mean-absolute error (MAE). - - "minimize-rmsle" - Minimize root-mean-squared log error - (RMSLE). + - "minimize-rmsle" - Minimize root-mean-squared log error + (RMSLE). - - "minimize-rmspe" - Minimize root-mean-squared percentage - error (RMSPE). + - "minimize-rmspe" - Minimize root-mean-squared percentage + error (RMSPE). - - "minimize-wape-mae" - Minimize the combination of - weighted absolute percentage error (WAPE) and - mean-absolute-error (MAE). + - "minimize-wape-mae" - Minimize the combination of weighted + absolute percentage error (WAPE) and mean-absolute-error + (MAE). - - "minimize-quantile-loss" - Minimize the quantile loss at - the quantiles defined in ``quantiles``. + - "minimize-quantile-loss" - Minimize the quantile loss at + the quantiles defined in ``quantiles``. train_budget_milli_node_hours (int): Required. The train budget of creating this model, expressed in milli node hours i.e. 1,000 @@ -175,11 +175,11 @@ class AutoMlForecastingInputs(proto.Message): Validation options for the data validation component. The available options are: - - "fail-pipeline" - default, will validate against the - validation and fail the pipeline if it fails. + - "fail-pipeline" - default, will validate against the + validation and fail the pipeline if it fails. - - "ignore-validation" - ignore the results of the - validation and continue + - "ignore-validation" - ignore the results of the validation + and continue additional_experiments (MutableSequence[str]): Additional experiment flags for the time series forcasting training. @@ -230,19 +230,19 @@ class AutoTransformation(proto.Message): class NumericTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The value converted to float32. + - The value converted to float32. - - The z_score of the value. + - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. + - z_score of log(value+1) when the value is greater than or equal to + 0. Otherwise, this transformation is not applied and the value is + considered a missing value. - - A boolean value that indicates whether the value is valid. + - A boolean value that indicates whether the value is valid. Attributes: column_name (str): @@ -257,15 +257,15 @@ class NumericTransformation(proto.Message): class CategoricalTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category gets + its own special lookup index and resulting embedding. Attributes: column_name (str): @@ -280,14 +280,14 @@ class CategoricalTransformation(proto.Message): class TimestampTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - Apply the transformation functions for Numerical columns. + - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the timestamp as a Categorical column. + - Determine the year, month, day,and weekday. Treat each value from + the timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. + - Invalid numerical values (for example, values that fall outside of + a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. Attributes: column_name (str): @@ -296,13 +296,13 @@ class TimestampTransformation(proto.Message): The format in which that time field is expressed. The time_format must either be one of: - - ``unix-seconds`` + - ``unix-seconds`` - - ``unix-milliseconds`` + - ``unix-milliseconds`` - - ``unix-microseconds`` + - ``unix-microseconds`` - - ``unix-nanoseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); @@ -326,11 +326,11 @@ class TimestampTransformation(proto.Message): class TextTransformation(proto.Message): r"""Training pipeline will perform following transformation functions. - - The text as is--no change to case, punctuation, spelling, tense, - and so on. + - The text as is--no change to case, punctuation, spelling, tense, + and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. Attributes: column_name (str): @@ -356,17 +356,21 @@ class TextTransformation(proto.Message): message="AutoMlForecastingInputs.Transformation.NumericTransformation", ) ) - categorical: "AutoMlForecastingInputs.Transformation.CategoricalTransformation" = proto.Field( + categorical: ( + "AutoMlForecastingInputs.Transformation.CategoricalTransformation" + ) = proto.Field( proto.MESSAGE, number=3, oneof="transformation_detail", message="AutoMlForecastingInputs.Transformation.CategoricalTransformation", ) - timestamp: "AutoMlForecastingInputs.Transformation.TimestampTransformation" = proto.Field( - proto.MESSAGE, - number=4, - oneof="transformation_detail", - message="AutoMlForecastingInputs.Transformation.TimestampTransformation", + timestamp: "AutoMlForecastingInputs.Transformation.TimestampTransformation" = ( + proto.Field( + proto.MESSAGE, + number=4, + oneof="transformation_detail", + message="AutoMlForecastingInputs.Transformation.TimestampTransformation", + ) ) text: "AutoMlForecastingInputs.Transformation.TextTransformation" = proto.Field( proto.MESSAGE, @@ -383,17 +387,17 @@ class Granularity(proto.Message): The time granularity unit of this time period. The supported units are: - - "minute" + - "minute" - - "hour" + - "hour" - - "day" + - "day" - - "week" + - "week" - - "month" + - "month" - - "year". + - "year". quantity (int): The number of granularity_units between data points in the training data. If ``granularity_unit`` is ``minute``, can be @@ -464,7 +468,9 @@ class Granularity(proto.Message): proto.INT64, number=24, ) - export_evaluated_data_items_config: gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig = proto.Field( + export_evaluated_data_items_config: ( + gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig + ) = proto.Field( proto.MESSAGE, number=15, message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py index b5d5aae72d..807ec63644 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -81,6 +81,7 @@ class ModelType(proto.Enum): TensorFlow Lite model and used on a Coral device afterwards. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_VERSATILE_1 = 2 diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py index e10e8e8960..3010649dcc 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -75,6 +75,7 @@ class ModelType(proto.Enum): ModelService.ExportModel) to a Jetson device afterwards. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_VERSATILE_1 = 2 diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py index 6569bec4f9..936069d754 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -88,6 +88,7 @@ class ModelType(proto.Enum): ModelService.ExportModel) and used on an NVIDIA Jetson device. """ + MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_VERSATILE_1 = 2 diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py index bd7683d9e1..3395793976 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ class ExportEvaluatedDataItemsConfig(proto.Message): If not specified, then results are exported to the following auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples + :export_evaluated_examples\_\_.evaluated_examples override_existing_table (bool): If true and an export destination is specified, then the contents of the destination diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 641185f0de..82937473e5 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.74.0" +__version__ = "1.148.1" diff --git a/google/cloud/aiplatform/vertex_ray/__init__.py b/google/cloud/aiplatform/vertex_ray/__init__.py index d44741030a..ffc93cc07c 100644 --- a/google/cloud/aiplatform/vertex_ray/__init__.py +++ b/google/cloud/aiplatform/vertex_ray/__init__.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,9 +46,9 @@ get_job_submission_client_cluster_info, ) -if sys.version_info[1] != 10: +if sys.version_info[1] not in (10, 11): print( - "[Ray on Vertex]: The client environment with Python version 3.10 is required." + "[Ray on Vertex]: The client environment with Python version 3.10 or 3.11 is required." ) __all__ = ( diff --git a/google/cloud/aiplatform/vertex_ray/bigquery_datasink.py b/google/cloud/aiplatform/vertex_ray/bigquery_datasink.py index 118b8bc71f..155fd171a1 100644 --- a/google/cloud/aiplatform/vertex_ray/bigquery_datasink.py +++ b/google/cloud/aiplatform/vertex_ray/bigquery_datasink.py @@ -51,7 +51,7 @@ ) -# BigQuery write for Ray 2.33.0 and 2.9.3 +# BigQuery write for Ray 2.47.1, 2.42.0, 2.33.0, and 2.9.3 if Datasink is None: _BigQueryDatasink = None else: diff --git a/google/cloud/aiplatform/vertex_ray/client_builder.py b/google/cloud/aiplatform/vertex_ray/client_builder.py index ce745435c1..effe04427c 100644 --- a/google/cloud/aiplatform/vertex_ray/client_builder.py +++ b/google/cloud/aiplatform/vertex_ray/client_builder.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -47,7 +47,7 @@ def __init__( persistent_resource_id, " failed to start Head node properly.", ) - if ray.__version__ == "2.33.0": + if ray.__version__ in ("2.47.1", "2.42.0", "2.33.0"): super().__init__( dashboard_url=dashboard_uri, python_version=ray_client_context.python_version, @@ -69,7 +69,7 @@ def __init__( else: raise ImportError( f"[Ray on Vertex AI]: Unsupported version {ray.__version__}." - + "Only 2.33.0 and 2.9.3 are supported." + + "Only 2.47.1, 2.42.0, 2.33.0, and 2.9.3 are supported." ) self.persistent_resource_id = persistent_resource_id self.vertex_sdk_version = str(VERTEX_SDK_VERSION) diff --git a/google/cloud/aiplatform/vertex_ray/cluster_init.py b/google/cloud/aiplatform/vertex_ray/cluster_init.py index eec45e5d88..357a6d0f4e 100644 --- a/google/cloud/aiplatform/vertex_ray/cluster_init.py +++ b/google/cloud/aiplatform/vertex_ray/cluster_init.py @@ -53,8 +53,8 @@ def create_ray_cluster( head_node_type: Optional[resources.Resources] = resources.Resources(), - python_version: Optional[str] = "3.10", - ray_version: Optional[str] = "2.33", + python_version: Optional[str] = None, + ray_version: Optional[str] = "2.47", network: Optional[str] = None, service_account: Optional[str] = None, cluster_name: Optional[str] = None, @@ -76,17 +76,17 @@ def create_ray_cluster( head_node_type = Resources( machine_type="n1-standard-8", node_count=1, - accelerator_type="NVIDIA_TESLA_K80", + accelerator_type="NVIDIA_TESLA_T4", accelerator_count=1, - custom_image="us-docker.pkg.dev/my-project/ray-cpu-image.2.9:latest", # Optional + custom_image="us-docker.pkg.dev/my-project/ray-2-47-cpu-py3.11:latest", # Optional ) worker_node_types = [Resources( machine_type="n1-standard-8", node_count=2, - accelerator_type="NVIDIA_TESLA_K80", + accelerator_type="NVIDIA_TESLA_T4", accelerator_count=1, - custom_image="us-docker.pkg.dev/my-project/ray-gpu-image.2.9:latest", # Optional + custom_image="us-docker.pkg.dev/my-project/ray-2-47-gpu-py3.11:latest", # Optional )] cluster_resource_name = vertex_ray.create_ray_cluster( @@ -95,7 +95,7 @@ def create_ray_cluster( service_account="my-service-account@my-project-number.iam.gserviceaccount.com", # Optional cluster_name="my-cluster-name", # Optional worker_node_types=worker_node_types, - ray_version="2.9", + ray_version="2.47", ) After a ray cluster is set up, you can call @@ -109,7 +109,7 @@ def create_ray_cluster( head_node_type: The head node resource. Resources.node_count must be 1. If not set, default value of Resources() class will be used. python_version: Python version for the ray cluster. - ray_version: Ray version for the ray cluster. Default is 2.33.0. + ray_version: Ray version for the ray cluster. Default is 2.47.1. network: Virtual private cloud (VPC) network. For Ray Client, VPC peering is required to connect to the Ray Cluster managed in the Vertex API service. For Ray Job API, VPC network is not required @@ -162,7 +162,7 @@ def create_ray_cluster( local_ray_verion = _validation_utils.get_local_ray_version() if ray_version != local_ray_verion: if custom_images is None and head_node_type.custom_image is None: - install_ray_version = "2.33.0" + install_ray_version = "2.47.1" logging.info( "[Ray on Vertex]: Local runtime has Ray version %s" ", but the requested cluster runtime has %s. Please " @@ -540,9 +540,9 @@ def update_ray_cluster( not_merged = 0 else: # No duplication w/ head node, write the 2nd worker node to the 2nd resource pool. - current_persistent_resource.resource_pools[ - i + not_merged - ].replica_count = worker_node_types[i].node_count + current_persistent_resource.resource_pools[i + not_merged].replica_count = ( + worker_node_types[i].node_count + ) # New worker_node_type.node_count should be >=1 unless the worker_node_type # and head_node_type are merged due to the same machine specs. if worker_node_types[i].node_count == 0: diff --git a/google/cloud/aiplatform/vertex_ray/dashboard_sdk.py b/google/cloud/aiplatform/vertex_ray/dashboard_sdk.py index 680e772724..b395f11dde 100644 --- a/google/cloud/aiplatform/vertex_ray/dashboard_sdk.py +++ b/google/cloud/aiplatform/vertex_ray/dashboard_sdk.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,15 +29,15 @@ def get_job_submission_client_cluster_info( """A vertex_ray implementation of get_job_submission_client_cluster_info(). Implements - https://github.com/ray-project/ray/blob/ray-2.33.0/python/ray/dashboard/modules/dashboard_sdk.py#L84 + https://github.com/ray-project/ray/blob/ray-2.47.1/python/ray/dashboard/modules/dashboard_sdk.py#L84 This will be called in from Ray Job API Python client. Args: address: Address without the module prefix `vertex_ray` but otherwise the same format as passed to ray.init(address="vertex_ray://..."). - *args: Reminder of positional args that might be passed down from + *args: Remainder of positional args that might be passed down from the framework. - **kwargs: Reminder of keyword args that might be passed down from + **kwargs: Remainder of keyword args that might be passed down from the framework. Returns: diff --git a/google/cloud/aiplatform/vertex_ray/data.py b/google/cloud/aiplatform/vertex_ray/data.py index 39ab572c98..217eb52106 100644 --- a/google/cloud/aiplatform/vertex_ray/data.py +++ b/google/cloud/aiplatform/vertex_ray/data.py @@ -57,26 +57,25 @@ def read_bigquery( dataset: The name of the dataset hosted in BigQuery in the format of ``dataset_id.table_id``. Both the dataset_id and table_id must exist otherwise an exception will be raised. - query: The query to execute. - The dataset is created from the results of executing the query if provided. - Otherwise, the entire dataset is read. For query syntax guidelines, see + query: The query to execute. The dataset is created from the results of + executing the query if provided. Otherwise, the entire dataset is read. + For query syntax guidelines, see https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax - parallelism: - 2.9.3: The requested parallelism of the read. If -1, it will be - automatically chosen based on the available cluster resources - and estimated in-memory data size. - 2.33.0: This argument is deprecated. Use ``override_num_blocks`` argument. + parallelism: 2.33.0, 2.42.0, 2.47.1: This argument is deprecated. Use + ``override_num_blocks`` argument. 2.9.3: The requested parallelism of + the read. If -1, it will be automatically chosen based on the available + cluster resources and estimated in-memory data size. ray_remote_args: kwargs passed to ray.remote in the read tasks. - concurrency: Not supported in 2.9.3. - 2.33.0: The maximum number of Ray tasks to run concurrently. Set this - to control number of tasks to run concurrently. This doesn't change the - total number of tasks run or the total number of output blocks. By default, - concurrency is dynamically decided based on the available resources. - override_num_blocks: Not supported in 2.9.3. - 2.33.0: Override the number of output blocks from all read tasks. - By default, the number of output blocks is dynamically decided based on - input data size and available resources. You shouldn't manually set this - value in most cases. + concurrency: Supported for 2.33.0, 2.42.0 and 2.47.1 only: The maximum number of + Ray tasks to run concurrently. Set this to control number of tasks to + run concurrently. This doesn't change the total number of tasks run or + the total number of output blocks. By default, concurrency is + dynamically decided based on the available resources. + override_num_blocks: Supported for 2.33.0, 2.42.0 and 2.47.1 only: Override the + number of output blocks from all read tasks. By default, the number of + output blocks is dynamically decided based on input data size and + available resources. You shouldn't manually set this value in most + cases. Returns: Dataset producing rows from the results of executing the query @@ -96,7 +95,7 @@ def read_bigquery( parallelism=parallelism, ray_remote_args=ray_remote_args, ) - elif ray.__version__ == "2.33.0": + elif ray.__version__ in ("2.33.0", "2.42.0", "2.47.1"): return ray.data.read_datasource( datasource=datasource, parallelism=parallelism, @@ -107,7 +106,7 @@ def read_bigquery( else: raise ImportError( f"[Ray on Vertex AI]: Unsupported version {ray.__version__}." - + "Only 2.33.0 and 2.9.3 are supported." + + "Only 2.47.1, 2.42.0, 2.33.0, and 2.9.3 are supported." ) @@ -135,19 +134,20 @@ def write_bigquery( The default number of retries is 10. ray_remote_args: kwargs passed to ray.remote in the write tasks. overwrite_table: Not supported in 2.9.3. - 2.33.0: Whether the write will overwrite the table if it already - exists. The default behavior is to overwrite the table. + 2.33.0, 2.42.0, 2.47.1: Whether the write will overwrite the table + if it already exists. The default behavior is to overwrite the table. If false, will append to the table if it exists. concurrency: Not supported in 2.9.3. - 2.33.0: The maximum number of Ray tasks to run concurrently. Set this - to control number of tasks to run concurrently. This doesn't change the - total number of tasks run or the total number of output blocks. By default, - concurrency is dynamically decided based on the available resources. + 2.33.0, 2.42.0, 2.47.1: The maximum number of Ray tasks to run concurrently. + Set this to control number of tasks to run concurrently. This doesn't + change the total number of tasks run or the total number of output blocks. + By default, concurrency is dynamically decided based on the available + resources. """ if ray.__version__ == "2.4.0": raise RuntimeError(_V2_4_WARNING_MESSAGE) - elif ray.__version__ == "2.9.3" or ray.__version__ == "2.33.0": + elif ray.__version__ in ("2.9.3", "2.33.0", "2.42.0", "2.47.1"): if ray.__version__ == "2.9.3": warnings.warn(_V2_9_WARNING_MESSAGE, DeprecationWarning, stacklevel=1) if ray_remote_args is None: @@ -174,7 +174,7 @@ def write_bigquery( datasink=datasink, ray_remote_args=ray_remote_args, ) - elif ray.__version__ == "2.33.0": + elif ray.__version__ in ("2.33.0", "2.42.0", "2.47.1"): datasink = _BigQueryDatasink( project_id=project_id, dataset=dataset, @@ -189,5 +189,5 @@ def write_bigquery( else: raise ImportError( f"[Ray on Vertex AI]: Unsupported version {ray.__version__}." - + "Only 2.33.0 and 2.9.3 are supported." + + "Only 2.47.1, 2.42.0, 2.33.0 and 2.9.3 are supported." ) diff --git a/google/cloud/aiplatform/vertex_ray/predict/sklearn/register.py b/google/cloud/aiplatform/vertex_ray/predict/sklearn/register.py index 2eed3158b0..fb6c2333ed 100644 --- a/google/cloud/aiplatform/vertex_ray/predict/sklearn/register.py +++ b/google/cloud/aiplatform/vertex_ray/predict/sklearn/register.py @@ -45,12 +45,15 @@ if TYPE_CHECKING: import sklearn -except ModuleNotFoundError as mnfe: - raise ModuleNotFoundError("Sklearn isn't installed.") from mnfe +except ImportError as ie: + if ray.__version__ < "2.42.0": + raise ModuleNotFoundError("Sklearn isn't installed.") from ie + else: + sklearn = None def register_sklearn( - checkpoint: ray_sklearn.SklearnCheckpoint, + checkpoint: "ray_sklearn.SklearnCheckpoint", artifact_uri: Optional[str] = None, display_name: Optional[str] = None, **kwargs, @@ -120,7 +123,7 @@ def register_sklearn( def _get_estimator_from( - checkpoint: ray_sklearn.SklearnCheckpoint, + checkpoint: "ray_sklearn.SklearnCheckpoint", ) -> "sklearn.base.BaseEstimator": """Converts a SklearnCheckpoint to sklearn estimator. diff --git a/google/cloud/aiplatform/vertex_ray/predict/torch/register.py b/google/cloud/aiplatform/vertex_ray/predict/torch/register.py index 550f81a9d2..07b25ed72c 100644 --- a/google/cloud/aiplatform/vertex_ray/predict/torch/register.py +++ b/google/cloud/aiplatform/vertex_ray/predict/torch/register.py @@ -1,4 +1,5 @@ """Regsiter Torch for Ray on Vertex AI.""" + # -*- coding: utf-8 -*- # Copyright 2023 Google LLC @@ -90,14 +91,18 @@ def get_pytorch_model_from( raise ModuleNotFoundError("PyTorch isn't installed.") from mnfe if os.path.exists(model_path): - model_or_state_dict = torch.load(model_path, map_location="cpu") + model_or_state_dict = torch.load( + model_path, map_location="cpu", weights_only=True + ) else: try: # Download from GCS to temp and then load_model with tempfile.TemporaryDirectory() as temp_dir: gcs_utils.download_from_gcs("gs://" + checkpoint.path, temp_dir) model_or_state_dict = torch.load( - f"{temp_dir}/{model_file_name}", map_location="cpu" + f"{temp_dir}/{model_file_name}", + map_location="cpu", + weights_only=True, ) except Exception as e: raise RuntimeError( diff --git a/google/cloud/aiplatform/vertex_ray/predict/util/predict_utils.py b/google/cloud/aiplatform/vertex_ray/predict/util/predict_utils.py index a18fe4cabd..e105dbd212 100644 --- a/google/cloud/aiplatform/vertex_ray/predict/util/predict_utils.py +++ b/google/cloud/aiplatform/vertex_ray/predict/util/predict_utils.py @@ -15,8 +15,7 @@ # limitations under the License. # -"""Predict Utils. -""" +"""Predict Utils.""" def validate_artifact_uri(artifact_uri: str) -> None: diff --git a/google/cloud/aiplatform/vertex_ray/util/_gapic_utils.py b/google/cloud/aiplatform/vertex_ray/util/_gapic_utils.py index 8ed158e012..e84162c45a 100644 --- a/google/cloud/aiplatform/vertex_ray/util/_gapic_utils.py +++ b/google/cloud/aiplatform/vertex_ray/util/_gapic_utils.py @@ -107,21 +107,33 @@ def get_persistent_resource( response = None if num_attempts >= tolerance: raise ValueError( - "[Ray on Vertex AI]: Invalid cluster_resource_name (404 not found)." + "[Ray on Vertex AI]: Invalid cluster_resource_name %s (404 not" + " found)." % persistent_resource_name ) if response: if response.error.message: - logging.error("[Ray on Vertex AI]: %s" % response.error.message) - raise RuntimeError("[Ray on Vertex AI]: Cluster returned an error.") + logging.error( + "[Ray on Vertex AI]: %s - %s", + persistent_resource_name, + response.error.message, + ) + raise RuntimeError( + "[Ray on Vertex AI]: Cluster %s returned an error." + % persistent_resource_name + ) print("[Ray on Vertex AI]: Cluster State =", response.state) if response.state == PersistentResource.State.RUNNING: return response elif response.state == PersistentResource.State.STOPPING: - raise RuntimeError("[Ray on Vertex AI]: The cluster is stopping.") + raise RuntimeError( + "[Ray on Vertex AI]: Cluster %s is stopping." + % persistent_resource_name + ) elif response.state == PersistentResource.State.ERROR: raise RuntimeError( - "[Ray on Vertex AI]: The cluster encountered an error." + "[Ray on Vertex AI]: Cluster %s encountered an error." + % persistent_resource_name ) # Polling decay sleep_time = polling_delay(num_attempts=num_attempts, time_scale=150.0) diff --git a/google/cloud/aiplatform/vertex_ray/util/_validation_utils.py b/google/cloud/aiplatform/vertex_ray/util/_validation_utils.py index c408f2ae3a..5c2a833aa7 100644 --- a/google/cloud/aiplatform/vertex_ray/util/_validation_utils.py +++ b/google/cloud/aiplatform/vertex_ray/util/_validation_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,15 +25,22 @@ from google.cloud.aiplatform import initializer from google.cloud.aiplatform.utils import resource_manager_utils -SUPPORTED_RAY_VERSIONS = immutabledict({"2.9": "2.9.3", "2.33": "2.33.0"}) -SUPPORTED_PY_VERSION = ["3.10"] +SUPPORTED_RAY_VERSIONS = immutabledict( + {"2.9": "2.9.3", "2.33": "2.33.0", "2.42": "2.42.0", "2.47": "2.47.1"} +) +SUPPORTED_RAY_VERSIONS_FROM_PYTHON_VERSIONS = immutabledict( + { + "3.10": ("2.9", "2.33", "2.42"), + "3.11": ("2.42", "2.47"), + } +) _V2_4_WARNING_MESSAGE = ( - "After google-cloud-aiplatform>1.53.0, using Ray version = 2.4 will result in an error. " - "Please use Ray version = 2.33.0 (default)." + "After google-cloud-aiplatform>1.53.0, using Ray version = 2.4 will result in " + "an error. Please use Ray version = 2.33.0, 2.42.0 or 2.47.1 (default) instead." ) _V2_9_WARNING_MESSAGE = ( "In March 2025, using Ray version = 2.9 will result in an error. " - "Please use Ray version = 2.33.0 (default) instead." + "Please use Ray version = 2.33.0, 2.42.0 or 2.47.1 (default) instead." ) @@ -100,18 +107,35 @@ def get_image_uri(ray_version, python_version, enable_cuda): list(SUPPORTED_RAY_VERSIONS.values())[1], ) ) - if python_version not in SUPPORTED_PY_VERSION: - raise ValueError("[Ray on Vertex AI]: The supported Python version is 3.10.") + if python_version is None: + for pv, ray_versions in SUPPORTED_RAY_VERSIONS_FROM_PYTHON_VERSIONS.items(): + if ray_version in ray_versions: + python_version = pv + break + if python_version not in SUPPORTED_RAY_VERSIONS_FROM_PYTHON_VERSIONS: + raise ValueError( + "[Ray on Vertex AI]: The supported Python versions are 3.10 or 3.11." + ) + + if ray_version not in SUPPORTED_RAY_VERSIONS_FROM_PYTHON_VERSIONS[python_version]: + raise ValueError( + "[Ray on Vertex AI]: The supported Ray version(s) for Python version %s: %s." + % ( + python_version, + SUPPORTED_RAY_VERSIONS_FROM_PYTHON_VERSIONS[python_version], + ) + ) location = initializer.global_config.location region = location.split("-")[0] if region not in _AVAILABLE_REGIONS: region = _DEFAULT_REGION ray_version = ray_version.replace(".", "-") + python_version = python_version.replace(".", "") if enable_cuda: - return f"{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.{ray_version}.py310:latest" + return f"{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.{ray_version}.py{python_version}:latest" else: - return f"{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.{ray_version}.py310:latest" + return f"{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.{ray_version}.py{python_version}:latest" def get_versions_from_image_uri(image_uri): @@ -120,7 +144,10 @@ def get_versions_from_image_uri(image_uri): image_label = image_uri.split("/")[-1].split(":")[0] py_version = image_label[-3] + "." + image_label[-2:] ray_version = image_label.split(".")[1].replace("-", ".") - if ray_version in SUPPORTED_RAY_VERSIONS and py_version in SUPPORTED_PY_VERSION: + if ( + py_version in SUPPORTED_RAY_VERSIONS_FROM_PYTHON_VERSIONS + and ray_version in SUPPORTED_RAY_VERSIONS_FROM_PYTHON_VERSIONS[py_version] + ): return py_version, ray_version else: # May not parse custom image and get the versions correctly diff --git a/google/cloud/aiplatform/vertex_ray/util/resources.py b/google/cloud/aiplatform/vertex_ray/util/resources.py index 901c7c0a4f..a9143a4dfd 100644 --- a/google/cloud/aiplatform/vertex_ray/util/resources.py +++ b/google/cloud/aiplatform/vertex_ray/util/resources.py @@ -69,17 +69,24 @@ class Resources: class NodeImages: """Custom images for a ray cluster. - We currently support Ray v2.9 and v2.33 and python v3.10. + We currently support Ray v2.9, v2.33, v2.42 and python v3.10. + We also support python v3.11 for Ray v2.42 and v2.47. The custom images must be extended from the following base images: "{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.2-9.py310:latest", "{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.2-9.py310:latest", - "{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.2-33.py310:latest", or - "{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.2-33.py310:latest". In + "{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.2-33.py310:latest", + "{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.2-33.py310:latest", + "{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.2-42.py310:latest", + "{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.2-42.py310:latest", + "{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.2-42.py311:latest", + "{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.2-42.py311:latest". + "{region}-docker.pkg.dev/vertex-ai/training/ray-cpu.2-47.py311:latest", or + "{region}-docker.pkg.dev/vertex-ai/training/ray-gpu.2-47.py311:latest". In order to use custom images, need to specify both head and worker images. Attributes: - head: image for head node (eg. us-docker.pkg.dev/my-project/ray-cpu.2-9.py310-tf:latest). - worker: image for all worker nodes (eg. us-docker.pkg.dev/my-project/ray-gpu.2-9.py310-tf:latest). + head: image for head node (eg. us-docker.pkg.dev/my-project/ray-cpu.2-47.py311:latest). + worker: image for all worker nodes (eg. us-docker.pkg.dev/my-project/ray-gpu.2-47.py311:latest). """ head: str = None @@ -146,8 +153,8 @@ class Cluster: service_account: Service account to be used for running Ray programs on the cluster. state: Describes the cluster state (defined in PersistentResource.State). - python_version: Python version for the ray cluster (e.g. "3.10"). - ray_version: Ray version for the ray cluster (e.g. "2.9"). + python_version: Python version for the ray cluster (e.g. "3.11"). + ray_version: Ray version for the ray cluster (e.g. "2.47"). head_node_type: The head node resource. Resources.node_count must be 1. If not set, by default it is a CPU node with machine_type of n1-standard-8. worker_node_types: The list of Resources of the worker nodes. Should not diff --git a/google/cloud/aiplatform/vizier/pyvizier/__init__.py b/google/cloud/aiplatform/vizier/pyvizier/__init__.py index fa2fab13c7..458da46301 100644 --- a/google/cloud/aiplatform/vizier/pyvizier/__init__.py +++ b/google/cloud/aiplatform/vizier/pyvizier/__init__.py @@ -36,7 +36,9 @@ 'Please install the SDK using "pip install google-vizier"' ) -from google.cloud.aiplatform.vizier.pyvizier.proto_converters import TrialConverter +from google.cloud.aiplatform.vizier.pyvizier.proto_converters import ( + TrialConverter, +) from google.cloud.aiplatform.vizier.pyvizier.proto_converters import ( ParameterConfigConverter, ) diff --git a/google/cloud/aiplatform/vizier/pyvizier/automated_stopping.py b/google/cloud/aiplatform/vizier/pyvizier/automated_stopping.py index b46f2b12ec..25a8302e93 100644 --- a/google/cloud/aiplatform/vizier/pyvizier/automated_stopping.py +++ b/google/cloud/aiplatform/vizier/pyvizier/automated_stopping.py @@ -1,4 +1,5 @@ """Convenience classes for configuring Vizier Early-Stopping Configs.""" + import copy from typing import Union diff --git a/google/cloud/aiplatform/vizier/pyvizier/proto_converters.py b/google/cloud/aiplatform/vizier/pyvizier/proto_converters.py index eb5a9292b6..688c007247 100644 --- a/google/cloud/aiplatform/vizier/pyvizier/proto_converters.py +++ b/google/cloud/aiplatform/vizier/pyvizier/proto_converters.py @@ -1,4 +1,5 @@ """Converters for OSS Vizier's protos from/to PyVizier's classes.""" + import logging from datetime import timezone from typing import List, Optional, Sequence, Tuple, Union @@ -234,13 +235,13 @@ def _set_child_parameter_configs( ) if "discrete_value_spec" in parent_proto: - conditional_parameter_spec.parent_discrete_values.values[ - : - ] = parent_values + conditional_parameter_spec.parent_discrete_values.values[:] = ( + parent_values + ) elif "categorical_value_spec" in parent_proto: - conditional_parameter_spec.parent_categorical_values.values[ - : - ] = parent_values + conditional_parameter_spec.parent_categorical_values.values[:] = ( + parent_values + ) elif "integer_value_spec" in parent_proto: conditional_parameter_spec.parent_int_values.values[:] = parent_values else: diff --git a/google/cloud/aiplatform/vizier/pyvizier/study_config.py b/google/cloud/aiplatform/vizier/pyvizier/study_config.py index 0940e56767..73e84ab26d 100644 --- a/google/cloud/aiplatform/vizier/pyvizier/study_config.py +++ b/google/cloud/aiplatform/vizier/pyvizier/study_config.py @@ -12,6 +12,7 @@ * `SearchSpace` and `SearchSpaceSelector` classes deals with Vizier search spaces. Both flat spaces and conditional parameters are supported. """ + import collections import copy import enum @@ -385,9 +386,9 @@ def _pytrial_parameters(self, pytrial: Trial) -> Dict[str, ParameterValueSequenc ValueError: If the trial parameters do not exist in this search space. ValueError: If the trial contains duplicate parameters. """ - trial_external_values: Dict[ - str, Union[float, int, str, bool] - ] = self._trial_to_external_values(pytrial) + trial_external_values: Dict[str, Union[float, int, str, bool]] = ( + self._trial_to_external_values(pytrial) + ) if len(trial_external_values) != len(pytrial.parameters): raise ValueError( "Invalid trial for this search space: failed to convert " diff --git a/google/cloud/aiplatform/vizier/trial.py b/google/cloud/aiplatform/vizier/trial.py index 310f08e36b..1b240f9f69 100644 --- a/google/cloud/aiplatform/vizier/trial.py +++ b/google/cloud/aiplatform/vizier/trial.py @@ -84,11 +84,13 @@ def __init__( ) self._gca_resource = self._get_gca_resource( resource_name=trial_name, - parent_resource_name_fields={ - study.Study._resource_noun: study_id, - } - if study_id - else study_id, + parent_resource_name_fields=( + { + study.Study._resource_noun: study_id, + } + if study_id + else study_id + ), ) @property @@ -136,9 +138,9 @@ def complete( complete_trial_request["infeasible_reason"] = infeasible_reason complete_trial_request["trial_infeasible"] = True if measurement is not None: - complete_trial_request[ - "final_measurement" - ] = vz.MeasurementConverter.to_proto(measurement) + complete_trial_request["final_measurement"] = ( + vz.MeasurementConverter.to_proto(measurement) + ) trial = self.api_client.complete_trial(request=complete_trial_request) return ( vz.MeasurementConverter.from_proto(trial.final_measurement) diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 28e1743e46..b28c7f0dd6 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,9 +15,21 @@ # from google.cloud.aiplatform_v1 import gapic_version as package_version +import google.api_core as api_core +import sys + __version__ = package_version.__version__ +if sys.version_info >= (3, 8): # pragma: NO COVER + from importlib import metadata +else: # pragma: NO COVER + # TODO(https://github.com/googleapis/python-api-core/issues/835): Remove + # this code path once we drop support for Python 3.7 + import importlib_metadata as metadata + +from .services.data_foundry_service import DataFoundryServiceClient +from .services.data_foundry_service import DataFoundryServiceAsyncClient from .services.dataset_service import DatasetServiceClient from .services.dataset_service import DatasetServiceAsyncClient from .services.deployment_resource_pool_service import ( @@ -48,6 +60,8 @@ ) from .services.featurestore_service import FeaturestoreServiceClient from .services.featurestore_service import FeaturestoreServiceAsyncClient +from .services.gen_ai_cache_service import GenAiCacheServiceClient +from .services.gen_ai_cache_service import GenAiCacheServiceAsyncClient from .services.gen_ai_tuning_service import GenAiTuningServiceClient from .services.gen_ai_tuning_service import GenAiTuningServiceAsyncClient from .services.index_endpoint_service import IndexEndpointServiceClient @@ -76,8 +90,18 @@ from .services.pipeline_service import PipelineServiceAsyncClient from .services.prediction_service import PredictionServiceClient from .services.prediction_service import PredictionServiceAsyncClient +from .services.reasoning_engine_execution_service import ( + ReasoningEngineExecutionServiceClient, +) +from .services.reasoning_engine_execution_service import ( + ReasoningEngineExecutionServiceAsyncClient, +) +from .services.reasoning_engine_service import ReasoningEngineServiceClient +from .services.reasoning_engine_service import ReasoningEngineServiceAsyncClient from .services.schedule_service import ScheduleServiceClient from .services.schedule_service import ScheduleServiceAsyncClient +from .services.session_service import SessionServiceClient +from .services.session_service import SessionServiceAsyncClient from .services.specialist_pool_service import SpecialistPoolServiceClient from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient from .services.tensorboard_service import TensorboardServiceClient @@ -95,6 +119,7 @@ from .types.api_auth import ApiAuth from .types.artifact import Artifact from .types.batch_prediction_job import BatchPredictionJob +from .types.cached_content import CachedContent from .types.completion_stats import CompletionStats from .types.content import Blob from .types.content import Candidate @@ -106,15 +131,27 @@ from .types.content import GroundingChunk from .types.content import GroundingMetadata from .types.content import GroundingSupport +from .types.content import ImageConfig from .types.content import LogprobsResult +from .types.content import ModalityTokenCount +from .types.content import ModelArmorConfig +from .types.content import MultiSpeakerVoiceConfig from .types.content import Part +from .types.content import PrebuiltVoiceConfig +from .types.content import ReplicatedVoiceConfig from .types.content import RetrievalMetadata from .types.content import SafetyRating from .types.content import SafetySetting from .types.content import SearchEntryPoint from .types.content import Segment +from .types.content import SpeakerVoiceConfig +from .types.content import SpeechConfig +from .types.content import UrlContextMetadata +from .types.content import UrlMetadata from .types.content import VideoMetadata +from .types.content import VoiceConfig from .types.content import HarmCategory +from .types.content import Modality from .types.context import Context from .types.custom_job import ContainerSpec from .types.custom_job import CustomJob @@ -122,6 +159,12 @@ from .types.custom_job import PythonPackageSpec from .types.custom_job import Scheduling from .types.custom_job import WorkerPoolSpec +from .types.data_foundry_service import GenerateSyntheticDataRequest +from .types.data_foundry_service import GenerateSyntheticDataResponse +from .types.data_foundry_service import OutputFieldSpec +from .types.data_foundry_service import SyntheticExample +from .types.data_foundry_service import SyntheticField +from .types.data_foundry_service import TaskDescriptionStrategy from .types.data_item import DataItem from .types.data_labeling_job import ActiveLearningConfig from .types.data_labeling_job import DataLabelingJob @@ -183,13 +226,16 @@ UpdateDeploymentResourcePoolOperationMetadata, ) from .types.deployment_resource_pool_service import UpdateDeploymentResourcePoolRequest +from .types.deployment_stage import DeploymentStage from .types.encryption_spec import EncryptionSpec from .types.endpoint import ClientConnectionConfig from .types.endpoint import DeployedModel from .types.endpoint import Endpoint from .types.endpoint import FasterDeploymentConfig +from .types.endpoint import GenAiAdvancedFeaturesConfig from .types.endpoint import PredictRequestResponseLoggingConfig from .types.endpoint import PrivateEndpoints +from .types.endpoint import SpeculativeDecodingSpec from .types.endpoint_service import CreateEndpointOperationMetadata from .types.endpoint_service import CreateEndpointRequest from .types.endpoint_service import DeleteEndpointRequest @@ -210,9 +256,14 @@ from .types.endpoint_service import UpdateEndpointRequest from .types.entity_type import EntityType from .types.env_var import EnvVar +from .types.env_var import SecretEnvVar +from .types.env_var import SecretRef from .types.evaluated_annotation import ErrorAnalysisAnnotation from .types.evaluated_annotation import EvaluatedAnnotation from .types.evaluated_annotation import EvaluatedAnnotationExplanation +from .types.evaluation_service import AggregationOutput +from .types.evaluation_service import AggregationResult +from .types.evaluation_service import AutoraterConfig from .types.evaluation_service import BleuInput from .types.evaluation_service import BleuInstance from .types.evaluation_service import BleuMetricValue @@ -226,8 +277,14 @@ from .types.evaluation_service import CometInstance from .types.evaluation_service import CometResult from .types.evaluation_service import CometSpec +from .types.evaluation_service import ComputationBasedMetricSpec +from .types.evaluation_service import ContentMap +from .types.evaluation_service import CustomOutput +from .types.evaluation_service import CustomOutputFormatConfig +from .types.evaluation_service import EvaluateDatasetResponse from .types.evaluation_service import EvaluateInstancesRequest from .types.evaluation_service import EvaluateInstancesResponse +from .types.evaluation_service import EvaluationDataset from .types.evaluation_service import ExactMatchInput from .types.evaluation_service import ExactMatchInstance from .types.evaluation_service import ExactMatchMetricValue @@ -245,10 +302,15 @@ from .types.evaluation_service import GroundednessInstance from .types.evaluation_service import GroundednessResult from .types.evaluation_service import GroundednessSpec +from .types.evaluation_service import LLMBasedMetricSpec +from .types.evaluation_service import Metric +from .types.evaluation_service import MetricResult from .types.evaluation_service import MetricxInput from .types.evaluation_service import MetricxInstance from .types.evaluation_service import MetricxResult from .types.evaluation_service import MetricxSpec +from .types.evaluation_service import OutputConfig +from .types.evaluation_service import OutputInfo from .types.evaluation_service import PairwiseMetricInput from .types.evaluation_service import PairwiseMetricInstance from .types.evaluation_service import PairwiseMetricResult @@ -265,6 +327,7 @@ from .types.evaluation_service import PointwiseMetricInstance from .types.evaluation_service import PointwiseMetricResult from .types.evaluation_service import PointwiseMetricSpec +from .types.evaluation_service import PredefinedMetricSpec from .types.evaluation_service import QuestionAnsweringCorrectnessInput from .types.evaluation_service import QuestionAnsweringCorrectnessInstance from .types.evaluation_service import QuestionAnsweringCorrectnessResult @@ -281,6 +344,7 @@ from .types.evaluation_service import QuestionAnsweringRelevanceInstance from .types.evaluation_service import QuestionAnsweringRelevanceResult from .types.evaluation_service import QuestionAnsweringRelevanceSpec +from .types.evaluation_service import RawOutput from .types.evaluation_service import RougeInput from .types.evaluation_service import RougeInstance from .types.evaluation_service import RougeMetricValue @@ -374,8 +438,12 @@ from .types.feature_online_store_admin_service import UpdateFeatureViewOperationMetadata from .types.feature_online_store_admin_service import UpdateFeatureViewRequest from .types.feature_online_store_service import FeatureViewDataKey +from .types.feature_online_store_service import FeatureViewDirectWriteRequest +from .types.feature_online_store_service import FeatureViewDirectWriteResponse from .types.feature_online_store_service import FetchFeatureValuesRequest from .types.feature_online_store_service import FetchFeatureValuesResponse +from .types.feature_online_store_service import GenerateFetchAccessTokenRequest +from .types.feature_online_store_service import GenerateFetchAccessTokenResponse from .types.feature_online_store_service import NearestNeighborQuery from .types.feature_online_store_service import NearestNeighbors from .types.feature_online_store_service import SearchNearestEntitiesRequest @@ -449,6 +517,12 @@ from .types.featurestore_service import UpdateFeatureRequest from .types.featurestore_service import UpdateFeaturestoreOperationMetadata from .types.featurestore_service import UpdateFeaturestoreRequest +from .types.gen_ai_cache_service import CreateCachedContentRequest +from .types.gen_ai_cache_service import DeleteCachedContentRequest +from .types.gen_ai_cache_service import GetCachedContentRequest +from .types.gen_ai_cache_service import ListCachedContentsRequest +from .types.gen_ai_cache_service import ListCachedContentsResponse +from .types.gen_ai_cache_service import UpdateCachedContentRequest from .types.genai_tuning_service import CancelTuningJobRequest from .types.genai_tuning_service import CreateTuningJobRequest from .types.genai_tuning_service import GetTuningJobRequest @@ -507,6 +581,8 @@ from .types.io import SharePointSources from .types.io import SlackSource from .types.io import TFRecordDestination +from .types.io import VertexMultimodalDatasetDestination +from .types.io import VertexMultimodalDatasetSource from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest from .types.job_service import CancelDataLabelingJobRequest @@ -561,6 +637,7 @@ from .types.machine_resources import BatchDedicatedResources from .types.machine_resources import DedicatedResources from .types.machine_resources import DiskSpec +from .types.machine_resources import LustreMount from .types.machine_resources import MachineSpec from .types.machine_resources import NfsMount from .types.machine_resources import PersistentDiskSpec @@ -630,6 +707,7 @@ from .types.migration_service import MigrateResourceResponse from .types.migration_service import SearchMigratableResourcesRequest from .types.migration_service import SearchMigratableResourcesResponse +from .types.model import Checkpoint from .types.model import GenieSource from .types.model import LargeModelReference from .types.model import Model @@ -655,6 +733,9 @@ ) from .types.model_evaluation import ModelEvaluation from .types.model_evaluation_slice import ModelEvaluationSlice +from .types.model_garden_service import DeployOperationMetadata +from .types.model_garden_service import DeployRequest +from .types.model_garden_service import DeployResponse from .types.model_garden_service import GetPublisherModelRequest from .types.model_garden_service import PublisherModelView from .types.model_monitoring import ModelMonitoringAlertConfig @@ -683,9 +764,12 @@ from .types.model_service import ListModelEvaluationsResponse from .types.model_service import ListModelsRequest from .types.model_service import ListModelsResponse +from .types.model_service import ListModelVersionCheckpointsRequest +from .types.model_service import ListModelVersionCheckpointsResponse from .types.model_service import ListModelVersionsRequest from .types.model_service import ListModelVersionsResponse from .types.model_service import MergeVersionAliasesRequest +from .types.model_service import ModelVersionCheckpoint from .types.model_service import UpdateExplanationDatasetOperationMetadata from .types.model_service import UpdateExplanationDatasetRequest from .types.model_service import UpdateExplanationDatasetResponse @@ -735,6 +819,9 @@ from .types.notebook_service import UpgradeNotebookRuntimeRequest from .types.notebook_service import UpgradeNotebookRuntimeResponse from .types.notebook_service import NotebookExecutionJobView +from .types.notebook_software_config import ColabImage +from .types.notebook_software_config import NotebookSoftwareConfig +from .types.notebook_software_config import PostStartupScriptConfig from .types.openapi import Schema from .types.openapi import Type from .types.operation import DeleteOperationMetadata @@ -787,6 +874,8 @@ from .types.prediction_service import DirectPredictResponse from .types.prediction_service import DirectRawPredictRequest from .types.prediction_service import DirectRawPredictResponse +from .types.prediction_service import EmbedContentRequest +from .types.prediction_service import EmbedContentResponse from .types.prediction_service import ExplainRequest from .types.prediction_service import ExplainResponse from .types.prediction_service import GenerateContentRequest @@ -804,6 +893,24 @@ from .types.prediction_service import StreamingRawPredictResponse from .types.prediction_service import StreamRawPredictRequest from .types.publisher_model import PublisherModel +from .types.reasoning_engine import ReasoningEngine +from .types.reasoning_engine import ReasoningEngineSpec +from .types.reasoning_engine_execution_service import ( + AsyncQueryReasoningEngineOperationMetadata, +) +from .types.reasoning_engine_execution_service import AsyncQueryReasoningEngineRequest +from .types.reasoning_engine_execution_service import AsyncQueryReasoningEngineResponse +from .types.reasoning_engine_execution_service import QueryReasoningEngineRequest +from .types.reasoning_engine_execution_service import QueryReasoningEngineResponse +from .types.reasoning_engine_execution_service import StreamQueryReasoningEngineRequest +from .types.reasoning_engine_service import CreateReasoningEngineOperationMetadata +from .types.reasoning_engine_service import CreateReasoningEngineRequest +from .types.reasoning_engine_service import DeleteReasoningEngineRequest +from .types.reasoning_engine_service import GetReasoningEngineRequest +from .types.reasoning_engine_service import ListReasoningEnginesRequest +from .types.reasoning_engine_service import ListReasoningEnginesResponse +from .types.reasoning_engine_service import UpdateReasoningEngineOperationMetadata +from .types.reasoning_engine_service import UpdateReasoningEngineRequest from .types.reservation_affinity import ReservationAffinity from .types.saved_query import SavedQuery from .types.schedule import Schedule @@ -815,9 +922,28 @@ from .types.schedule_service import PauseScheduleRequest from .types.schedule_service import ResumeScheduleRequest from .types.schedule_service import UpdateScheduleRequest +from .types.service_networking import DnsPeeringConfig from .types.service_networking import PrivateServiceConnectConfig from .types.service_networking import PscAutomatedEndpoints from .types.service_networking import PSCAutomationConfig +from .types.service_networking import PscInterfaceConfig +from .types.service_networking import PSCAutomationState +from .types.session import EventActions +from .types.session import EventMetadata +from .types.session import Session +from .types.session import SessionEvent +from .types.session import Transcription +from .types.session_service import AppendEventRequest +from .types.session_service import AppendEventResponse +from .types.session_service import CreateSessionOperationMetadata +from .types.session_service import CreateSessionRequest +from .types.session_service import DeleteSessionRequest +from .types.session_service import GetSessionRequest +from .types.session_service import ListEventsRequest +from .types.session_service import ListEventsResponse +from .types.session_service import ListSessionsRequest +from .types.session_service import ListSessionsResponse +from .types.session_service import UpdateSessionRequest from .types.specialist_pool import SpecialistPool from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata from .types.specialist_pool_service import CreateSpecialistPoolRequest @@ -889,16 +1015,26 @@ from .types.tensorboard_service import WriteTensorboardRunDataRequest from .types.tensorboard_service import WriteTensorboardRunDataResponse from .types.tensorboard_time_series import TensorboardTimeSeries +from .types.tool import CodeExecutionResult from .types.tool import DynamicRetrievalConfig +from .types.tool import EnterpriseWebSearch +from .types.tool import ExecutableCode from .types.tool import FunctionCall from .types.tool import FunctionCallingConfig from .types.tool import FunctionDeclaration from .types.tool import FunctionResponse +from .types.tool import FunctionResponseBlob +from .types.tool import FunctionResponseFileData +from .types.tool import FunctionResponsePart +from .types.tool import GoogleMaps from .types.tool import GoogleSearchRetrieval +from .types.tool import PartialArg from .types.tool import RagRetrievalConfig from .types.tool import Retrieval +from .types.tool import RetrievalConfig from .types.tool import Tool from .types.tool import ToolConfig +from .types.tool import UrlContext from .types.tool import VertexAISearch from .types.tool import VertexRagStore from .types.training_pipeline import FilterSplit @@ -908,11 +1044,15 @@ from .types.training_pipeline import StratifiedSplit from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline +from .types.tuning_job import EvaluateDatasetRun +from .types.tuning_job import EvaluationConfig +from .types.tuning_job import PreTunedModel from .types.tuning_job import SupervisedHyperParameters from .types.tuning_job import SupervisedTuningDatasetDistribution from .types.tuning_job import SupervisedTuningDataStats from .types.tuning_job import SupervisedTuningSpec from .types.tuning_job import TunedModel +from .types.tuning_job import TunedModelCheckpoint from .types.tuning_job import TunedModelRef from .types.tuning_job import TuningDataStats from .types.tuning_job import TuningJob @@ -922,23 +1062,30 @@ from .types.types import StringArray from .types.types import Tensor from .types.unmanaged_container_model import UnmanagedContainerModel +from .types.usage_metadata import UsageMetadata from .types.user_action_reference import UserActionReference from .types.value import Value from .types.vertex_rag_data import CorpusStatus from .types.vertex_rag_data import FileStatus from .types.vertex_rag_data import ImportRagFilesConfig +from .types.vertex_rag_data import RagChunk from .types.vertex_rag_data import RagCorpus from .types.vertex_rag_data import RagEmbeddingModelConfig +from .types.vertex_rag_data import RagEngineConfig from .types.vertex_rag_data import RagFile from .types.vertex_rag_data import RagFileChunkingConfig +from .types.vertex_rag_data import RagFileParsingConfig from .types.vertex_rag_data import RagFileTransformationConfig +from .types.vertex_rag_data import RagManagedDbConfig from .types.vertex_rag_data import RagVectorDbConfig from .types.vertex_rag_data import UploadRagFileConfig +from .types.vertex_rag_data import VertexAiSearchConfig from .types.vertex_rag_data_service import CreateRagCorpusOperationMetadata from .types.vertex_rag_data_service import CreateRagCorpusRequest from .types.vertex_rag_data_service import DeleteRagCorpusRequest from .types.vertex_rag_data_service import DeleteRagFileRequest from .types.vertex_rag_data_service import GetRagCorpusRequest +from .types.vertex_rag_data_service import GetRagEngineConfigRequest from .types.vertex_rag_data_service import GetRagFileRequest from .types.vertex_rag_data_service import ImportRagFilesOperationMetadata from .types.vertex_rag_data_service import ImportRagFilesRequest @@ -949,8 +1096,15 @@ from .types.vertex_rag_data_service import ListRagFilesResponse from .types.vertex_rag_data_service import UpdateRagCorpusOperationMetadata from .types.vertex_rag_data_service import UpdateRagCorpusRequest +from .types.vertex_rag_data_service import UpdateRagEngineConfigOperationMetadata +from .types.vertex_rag_data_service import UpdateRagEngineConfigRequest from .types.vertex_rag_data_service import UploadRagFileRequest from .types.vertex_rag_data_service import UploadRagFileResponse +from .types.vertex_rag_service import AskContextsRequest +from .types.vertex_rag_service import AskContextsResponse +from .types.vertex_rag_service import AsyncRetrieveContextsOperationMetadata +from .types.vertex_rag_service import AsyncRetrieveContextsRequest +from .types.vertex_rag_service import AsyncRetrieveContextsResponse from .types.vertex_rag_service import AugmentPromptRequest from .types.vertex_rag_service import AugmentPromptResponse from .types.vertex_rag_service import Claim @@ -984,7 +1138,102 @@ from .types.vizier_service import SuggestTrialsRequest from .types.vizier_service import SuggestTrialsResponse +if hasattr(api_core, "check_python_version") and hasattr( + api_core, "check_dependency_versions" +): # pragma: NO COVER + api_core.check_python_version("google.cloud.aiplatform_v1") # type: ignore + api_core.check_dependency_versions("google.cloud.aiplatform_v1") # type: ignore +else: # pragma: NO COVER + # An older version of api_core is installed which does not define the + # functions above. We do equivalent checks manually. + try: + import warnings + import sys + + _py_version_str = sys.version.split()[0] + _package_label = "google.cloud.aiplatform_v1" + if sys.version_info < (3, 9): + warnings.warn( + "You are using a non-supported Python version " + + f"({_py_version_str}). Google will not post any further " + + f"updates to {_package_label} supporting this Python version. " + + "Please upgrade to the latest Python version, or at " + + f"least to Python 3.9, and then update {_package_label}.", + FutureWarning, + ) + if sys.version_info[:2] == (3, 9): + warnings.warn( + f"You are using a Python version ({_py_version_str}) " + + f"which Google will stop supporting in {_package_label} in " + + "January 2026. Please " + + "upgrade to the latest Python version, or at " + + "least to Python 3.10, before then, and " + + f"then update {_package_label}.", + FutureWarning, + ) + + def parse_version_to_tuple(version_string: str): + """Safely converts a semantic version string to a comparable tuple of integers. + Example: "4.25.8" -> (4, 25, 8) + Ignores non-numeric parts and handles common version formats. + Args: + version_string: Version string in the format "x.y.z" or "x.y.z" + Returns: + Tuple of integers for the parsed version string. + """ + parts = [] + for part in version_string.split("."): + try: + parts.append(int(part)) + except ValueError: + # If it's a non-numeric part (e.g., '1.0.0b1' -> 'b1'), stop here. + # This is a simplification compared to 'packaging.parse_version', but sufficient + # for comparing strictly numeric semantic versions. + break + return tuple(parts) + + def _get_version(dependency_name): + try: + version_string: str = metadata.version(dependency_name) + parsed_version = parse_version_to_tuple(version_string) + return (parsed_version, version_string) + except Exception: + # Catch exceptions from metadata.version() (e.g., PackageNotFoundError) + # or errors during parse_version_to_tuple + return (None, "--") + + _dependency_package = "google.protobuf" + _next_supported_version = "4.25.8" + _next_supported_version_tuple = (4, 25, 8) + _recommendation = " (we recommend 6.x)" + (_version_used, _version_used_string) = _get_version(_dependency_package) + if _version_used and _version_used < _next_supported_version_tuple: + warnings.warn( + f"Package {_package_label} depends on " + + f"{_dependency_package}, currently installed at version " + + f"{_version_used_string}. Future updates to " + + f"{_package_label} will require {_dependency_package} at " + + f"version {_next_supported_version} or higher{_recommendation}." + + " Please ensure " + + "that either (a) your Python environment doesn't pin the " + + f"version of {_dependency_package}, so that updates to " + + f"{_package_label} can require the higher version, or " + + "(b) you manually update your Python environment to use at " + + f"least version {_next_supported_version} of " + + f"{_dependency_package}.", + FutureWarning, + ) + except Exception: + warnings.warn( + "Could not determine the version of Python " + + "currently being used. To continue receiving " + + "updates for {_package_label}, ensure you are " + + "using a supported version of Python; see " + + "https://devguide.python.org/versions/" + ) + __all__ = ( + "DataFoundryServiceAsyncClient", "DatasetServiceAsyncClient", "DeploymentResourcePoolServiceAsyncClient", "EndpointServiceAsyncClient", @@ -994,6 +1243,7 @@ "FeatureRegistryServiceAsyncClient", "FeaturestoreOnlineServingServiceAsyncClient", "FeaturestoreServiceAsyncClient", + "GenAiCacheServiceAsyncClient", "GenAiTuningServiceAsyncClient", "IndexEndpointServiceAsyncClient", "IndexServiceAsyncClient", @@ -1008,7 +1258,10 @@ "PersistentResourceServiceAsyncClient", "PipelineServiceAsyncClient", "PredictionServiceAsyncClient", + "ReasoningEngineExecutionServiceAsyncClient", + "ReasoningEngineServiceAsyncClient", "ScheduleServiceAsyncClient", + "SessionServiceAsyncClient", "SpecialistPoolServiceAsyncClient", "TensorboardServiceAsyncClient", "VertexRagDataServiceAsyncClient", @@ -1023,16 +1276,29 @@ "AddExecutionEventsRequest", "AddExecutionEventsResponse", "AddTrialMeasurementRequest", + "AggregationOutput", + "AggregationResult", "Annotation", "AnnotationSpec", "ApiAuth", + "AppendEventRequest", + "AppendEventResponse", "Artifact", + "AskContextsRequest", + "AskContextsResponse", "AssignNotebookRuntimeOperationMetadata", "AssignNotebookRuntimeRequest", + "AsyncQueryReasoningEngineOperationMetadata", + "AsyncQueryReasoningEngineRequest", + "AsyncQueryReasoningEngineResponse", + "AsyncRetrieveContextsOperationMetadata", + "AsyncRetrieveContextsRequest", + "AsyncRetrieveContextsResponse", "Attribution", "AugmentPromptRequest", "AugmentPromptResponse", "AutomaticResources", + "AutoraterConfig", "AutoscalingMetricSpec", "AvroSource", "BatchCancelPipelineJobsOperationMetadata", @@ -1071,6 +1337,7 @@ "Blob", "BlurBaselineConfig", "BoolArray", + "CachedContent", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", "CancelDataLabelingJobRequest", @@ -1083,25 +1350,30 @@ "CheckTrialEarlyStoppingStateMetatdata", "CheckTrialEarlyStoppingStateRequest", "CheckTrialEarlyStoppingStateResponse", + "Checkpoint", "Citation", "CitationMetadata", "Claim", "ClientConnectionConfig", + "CodeExecutionResult", "CoherenceInput", "CoherenceInstance", "CoherenceResult", "CoherenceSpec", + "ColabImage", "CometInput", "CometInstance", "CometResult", "CometSpec", "CompleteTrialRequest", "CompletionStats", + "ComputationBasedMetricSpec", "ComputeTokensRequest", "ComputeTokensResponse", "ContainerRegistryDestination", "ContainerSpec", "Content", + "ContentMap", "Context", "CopyModelOperationMetadata", "CopyModelRequest", @@ -1113,6 +1385,7 @@ "CountTokensResponse", "CreateArtifactRequest", "CreateBatchPredictionJobRequest", + "CreateCachedContentRequest", "CreateContextRequest", "CreateCustomJobRequest", "CreateDataLabelingJobRequest", @@ -1156,8 +1429,12 @@ "CreatePipelineJobRequest", "CreateRagCorpusOperationMetadata", "CreateRagCorpusRequest", + "CreateReasoningEngineOperationMetadata", + "CreateReasoningEngineRequest", "CreateRegistryFeatureOperationMetadata", "CreateScheduleRequest", + "CreateSessionOperationMetadata", + "CreateSessionRequest", "CreateSpecialistPoolOperationMetadata", "CreateSpecialistPoolRequest", "CreateStudyRequest", @@ -1173,6 +1450,9 @@ "CsvSource", "CustomJob", "CustomJobSpec", + "CustomOutput", + "CustomOutputFormatConfig", + "DataFoundryServiceClient", "DataItem", "DataItemView", "DataLabelingJob", @@ -1182,6 +1462,7 @@ "DedicatedResources", "DeleteArtifactRequest", "DeleteBatchPredictionJobRequest", + "DeleteCachedContentRequest", "DeleteContextRequest", "DeleteCustomJobRequest", "DeleteDataLabelingJobRequest", @@ -1216,8 +1497,10 @@ "DeletePipelineJobRequest", "DeleteRagCorpusRequest", "DeleteRagFileRequest", + "DeleteReasoningEngineRequest", "DeleteSavedQueryRequest", "DeleteScheduleRequest", + "DeleteSessionRequest", "DeleteSpecialistPoolRequest", "DeleteStudyRequest", "DeleteTensorboardExperimentRequest", @@ -1232,6 +1515,9 @@ "DeployModelOperationMetadata", "DeployModelRequest", "DeployModelResponse", + "DeployOperationMetadata", + "DeployRequest", + "DeployResponse", "DeployedIndex", "DeployedIndexAuthConfig", "DeployedIndexRef", @@ -1239,6 +1525,7 @@ "DeployedModelRef", "DeploymentResourcePool", "DeploymentResourcePoolServiceClient", + "DeploymentStage", "DestinationFeatureSetting", "DirectPredictRequest", "DirectPredictResponse", @@ -1246,21 +1533,31 @@ "DirectRawPredictResponse", "DirectUploadSource", "DiskSpec", + "DnsPeeringConfig", "DoubleArray", "DynamicRetrievalConfig", + "EmbedContentRequest", + "EmbedContentResponse", "EncryptionSpec", "Endpoint", "EndpointServiceClient", + "EnterpriseWebSearch", "EntityIdSelector", "EntityType", "EnvVar", "ErrorAnalysisAnnotation", + "EvaluateDatasetResponse", + "EvaluateDatasetRun", "EvaluateInstancesRequest", "EvaluateInstancesResponse", "EvaluatedAnnotation", "EvaluatedAnnotationExplanation", + "EvaluationConfig", + "EvaluationDataset", "EvaluationServiceClient", "Event", + "EventActions", + "EventMetadata", "ExactMatchInput", "ExactMatchInstance", "ExactMatchMetricValue", @@ -1269,6 +1566,7 @@ "Examples", "ExamplesOverride", "ExamplesRestrictionsNamespace", + "ExecutableCode", "Execution", "ExplainRequest", "ExplainResponse", @@ -1309,6 +1607,8 @@ "FeatureView", "FeatureViewDataFormat", "FeatureViewDataKey", + "FeatureViewDirectWriteRequest", + "FeatureViewDirectWriteResponse", "FeatureViewSync", "Featurestore", "FeaturestoreMonitoringConfig", @@ -1334,17 +1634,27 @@ "FunctionCallingConfig", "FunctionDeclaration", "FunctionResponse", + "FunctionResponseBlob", + "FunctionResponseFileData", + "FunctionResponsePart", "GcsDestination", "GcsSource", + "GenAiAdvancedFeaturesConfig", + "GenAiCacheServiceClient", "GenAiTuningServiceClient", "GenerateContentRequest", "GenerateContentResponse", + "GenerateFetchAccessTokenRequest", + "GenerateFetchAccessTokenResponse", + "GenerateSyntheticDataRequest", + "GenerateSyntheticDataResponse", "GenerationConfig", "GenericOperationMetadata", "GenieSource", "GetAnnotationSpecRequest", "GetArtifactRequest", "GetBatchPredictionJobRequest", + "GetCachedContentRequest", "GetContextRequest", "GetCustomJobRequest", "GetDataLabelingJobRequest", @@ -1378,8 +1688,11 @@ "GetPipelineJobRequest", "GetPublisherModelRequest", "GetRagCorpusRequest", + "GetRagEngineConfigRequest", "GetRagFileRequest", + "GetReasoningEngineRequest", "GetScheduleRequest", + "GetSessionRequest", "GetSpecialistPoolRequest", "GetStudyRequest", "GetTensorboardExperimentRequest", @@ -1390,6 +1703,7 @@ "GetTrialRequest", "GetTuningJobRequest", "GoogleDriveSource", + "GoogleMaps", "GoogleSearchRetrieval", "GroundednessInput", "GroundednessInstance", @@ -1401,6 +1715,7 @@ "HarmCategory", "HyperparameterTuningJob", "IdMatcher", + "ImageConfig", "ImportDataConfig", "ImportDataOperationMetadata", "ImportDataRequest", @@ -1426,6 +1741,7 @@ "JiraSource", "JobServiceClient", "JobState", + "LLMBasedMetricSpec", "LargeModelReference", "LineageSubgraph", "ListAnnotationsRequest", @@ -1434,6 +1750,8 @@ "ListArtifactsResponse", "ListBatchPredictionJobsRequest", "ListBatchPredictionJobsResponse", + "ListCachedContentsRequest", + "ListCachedContentsResponse", "ListContextsRequest", "ListContextsResponse", "ListCustomJobsRequest", @@ -1452,6 +1770,8 @@ "ListEndpointsResponse", "ListEntityTypesRequest", "ListEntityTypesResponse", + "ListEventsRequest", + "ListEventsResponse", "ListExecutionsRequest", "ListExecutionsResponse", "ListFeatureGroupsRequest", @@ -1482,6 +1802,8 @@ "ListModelEvaluationSlicesResponse", "ListModelEvaluationsRequest", "ListModelEvaluationsResponse", + "ListModelVersionCheckpointsRequest", + "ListModelVersionCheckpointsResponse", "ListModelVersionsRequest", "ListModelVersionsResponse", "ListModelsRequest", @@ -1506,10 +1828,14 @@ "ListRagCorporaResponse", "ListRagFilesRequest", "ListRagFilesResponse", + "ListReasoningEnginesRequest", + "ListReasoningEnginesResponse", "ListSavedQueriesRequest", "ListSavedQueriesResponse", "ListSchedulesRequest", "ListSchedulesResponse", + "ListSessionsRequest", + "ListSessionsResponse", "ListSpecialistPoolsRequest", "ListSpecialistPoolsResponse", "ListStudiesRequest", @@ -1531,6 +1857,7 @@ "LlmUtilityServiceClient", "LogprobsResult", "LookupStudyRequest", + "LustreMount", "MachineSpec", "ManualBatchTuningParameters", "MatchServiceClient", @@ -1539,6 +1866,8 @@ "MetadataSchema", "MetadataServiceClient", "MetadataStore", + "Metric", + "MetricResult", "MetricxInput", "MetricxInstance", "MetricxResult", @@ -1547,7 +1876,10 @@ "MigrateResourceRequest", "MigrateResourceResponse", "MigrationServiceClient", + "Modality", + "ModalityTokenCount", "Model", + "ModelArmorConfig", "ModelContainerSpec", "ModelDeploymentMonitoringBigQueryTable", "ModelDeploymentMonitoringJob", @@ -1564,6 +1896,8 @@ "ModelMonitoringStatsAnomalies", "ModelServiceClient", "ModelSourceInfo", + "ModelVersionCheckpoint", + "MultiSpeakerVoiceConfig", "MutateDeployedIndexOperationMetadata", "MutateDeployedIndexRequest", "MutateDeployedIndexResponse", @@ -1590,7 +1924,12 @@ "NotebookRuntimeTemplateRef", "NotebookRuntimeType", "NotebookServiceClient", + "NotebookSoftwareConfig", + "OutputConfig", + "OutputFieldSpec", + "OutputInfo", "PSCAutomationConfig", + "PSCAutomationState", "PairwiseChoice", "PairwiseMetricInput", "PairwiseMetricInstance", @@ -1605,6 +1944,7 @@ "PairwiseSummarizationQualityResult", "PairwiseSummarizationQualitySpec", "Part", + "PartialArg", "PauseModelDeploymentMonitoringJobRequest", "PauseScheduleRequest", "PersistentDiskSpec", @@ -1623,6 +1963,10 @@ "PointwiseMetricResult", "PointwiseMetricSpec", "Port", + "PostStartupScriptConfig", + "PreTunedModel", + "PrebuiltVoiceConfig", + "PredefinedMetricSpec", "PredefinedSplit", "PredictRequest", "PredictRequestResponseLoggingConfig", @@ -1634,6 +1978,7 @@ "PrivateServiceConnectConfig", "Probe", "PscAutomatedEndpoints", + "PscInterfaceConfig", "PublisherModel", "PublisherModelView", "PurgeArtifactsMetadata", @@ -1651,6 +1996,8 @@ "QueryDeployedModelsRequest", "QueryDeployedModelsResponse", "QueryExecutionInputsAndOutputsRequest", + "QueryReasoningEngineRequest", + "QueryReasoningEngineResponse", "QuestionAnsweringCorrectnessInput", "QuestionAnsweringCorrectnessInstance", "QuestionAnsweringCorrectnessResult", @@ -1667,15 +2014,20 @@ "QuestionAnsweringRelevanceInstance", "QuestionAnsweringRelevanceResult", "QuestionAnsweringRelevanceSpec", + "RagChunk", "RagContexts", "RagCorpus", "RagEmbeddingModelConfig", + "RagEngineConfig", "RagFile", "RagFileChunkingConfig", + "RagFileParsingConfig", "RagFileTransformationConfig", + "RagManagedDbConfig", "RagQuery", "RagRetrievalConfig", "RagVectorDbConfig", + "RawOutput", "RawPredictRequest", "RayLogsSpec", "RayMetricSpec", @@ -1692,6 +2044,10 @@ "ReadTensorboardTimeSeriesDataResponse", "ReadTensorboardUsageRequest", "ReadTensorboardUsageResponse", + "ReasoningEngine", + "ReasoningEngineExecutionServiceClient", + "ReasoningEngineServiceClient", + "ReasoningEngineSpec", "RebaseTunedModelOperationMetadata", "RebaseTunedModelRequest", "RebootPersistentResourceOperationMetadata", @@ -1700,6 +2056,7 @@ "RemoveContextChildrenResponse", "RemoveDatapointsRequest", "RemoveDatapointsResponse", + "ReplicatedVoiceConfig", "ReservationAffinity", "ResourcePool", "ResourceRuntime", @@ -1710,6 +2067,7 @@ "ResumeModelDeploymentMonitoringJobRequest", "ResumeScheduleRequest", "Retrieval", + "RetrievalConfig", "RetrievalMetadata", "RetrieveContextsRequest", "RetrieveContextsResponse", @@ -1744,14 +2102,22 @@ "SearchModelDeploymentMonitoringStatsAnomaliesResponse", "SearchNearestEntitiesRequest", "SearchNearestEntitiesResponse", + "SecretEnvVar", + "SecretRef", "Segment", "ServiceAccountSpec", + "Session", + "SessionEvent", + "SessionServiceClient", "SharePointSources", "ShieldedVmConfig", "SlackSource", "SmoothGradConfig", + "SpeakerVoiceConfig", "SpecialistPool", "SpecialistPoolServiceClient", + "SpeculativeDecodingSpec", + "SpeechConfig", "StartNotebookRuntimeOperationMetadata", "StartNotebookRuntimeRequest", "StartNotebookRuntimeResponse", @@ -1764,6 +2130,7 @@ "StreamDirectPredictResponse", "StreamDirectRawPredictRequest", "StreamDirectRawPredictResponse", + "StreamQueryReasoningEngineRequest", "StreamRawPredictRequest", "StreamingPredictRequest", "StreamingPredictResponse", @@ -1797,7 +2164,10 @@ "SupervisedTuningSpec", "SyncFeatureViewRequest", "SyncFeatureViewResponse", + "SyntheticExample", + "SyntheticField", "TFRecordDestination", + "TaskDescriptionStrategy", "Tensor", "Tensorboard", "TensorboardBlob", @@ -1836,9 +2206,11 @@ "ToolParameterKeyMatchSpec", "TrainingConfig", "TrainingPipeline", + "Transcription", "Trial", "TrialContext", "TunedModel", + "TunedModelCheckpoint", "TunedModelRef", "TuningDataStats", "TuningJob", @@ -1851,6 +2223,7 @@ "UndeployModelResponse", "UnmanagedContainerModel", "UpdateArtifactRequest", + "UpdateCachedContentRequest", "UpdateContextRequest", "UpdateDatasetRequest", "UpdateDatasetVersionRequest", @@ -1885,7 +2258,12 @@ "UpdatePersistentResourceRequest", "UpdateRagCorpusOperationMetadata", "UpdateRagCorpusRequest", + "UpdateRagEngineConfigOperationMetadata", + "UpdateRagEngineConfigRequest", + "UpdateReasoningEngineOperationMetadata", + "UpdateReasoningEngineRequest", "UpdateScheduleRequest", + "UpdateSessionRequest", "UpdateSpecialistPoolOperationMetadata", "UpdateSpecialistPoolRequest", "UpdateTensorboardExperimentRequest", @@ -1904,14 +2282,22 @@ "UploadRagFileResponse", "UpsertDatapointsRequest", "UpsertDatapointsResponse", + "UrlContext", + "UrlContextMetadata", + "UrlMetadata", + "UsageMetadata", "UserActionReference", "Value", "VertexAISearch", + "VertexAiSearchConfig", + "VertexMultimodalDatasetDestination", + "VertexMultimodalDatasetSource", "VertexRagDataServiceClient", "VertexRagServiceClient", "VertexRagStore", "VideoMetadata", "VizierServiceClient", + "VoiceConfig", "WorkerPoolSpec", "WriteFeatureValuesPayload", "WriteFeatureValuesRequest", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index 4b25e27e52..7aeb9b5405 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -5,6 +5,40 @@ "protoPackage": "google.cloud.aiplatform.v1", "schema": "1.0", "services": { + "DataFoundryService": { + "clients": { + "grpc": { + "libraryClient": "DataFoundryServiceClient", + "rpcs": { + "GenerateSyntheticData": { + "methods": [ + "generate_synthetic_data" + ] + } + } + }, + "grpc-async": { + "libraryClient": "DataFoundryServiceAsyncClient", + "rpcs": { + "GenerateSyntheticData": { + "methods": [ + "generate_synthetic_data" + ] + } + } + }, + "rest": { + "libraryClient": "DataFoundryServiceClient", + "rpcs": { + "GenerateSyntheticData": { + "methods": [ + "generate_synthetic_data" + ] + } + } + } + } + }, "DatasetService": { "clients": { "grpc": { @@ -825,11 +859,21 @@ "grpc": { "libraryClient": "FeatureOnlineStoreServiceClient", "rpcs": { + "FeatureViewDirectWrite": { + "methods": [ + "feature_view_direct_write" + ] + }, "FetchFeatureValues": { "methods": [ "fetch_feature_values" ] }, + "GenerateFetchAccessToken": { + "methods": [ + "generate_fetch_access_token" + ] + }, "SearchNearestEntities": { "methods": [ "search_nearest_entities" @@ -840,11 +884,21 @@ "grpc-async": { "libraryClient": "FeatureOnlineStoreServiceAsyncClient", "rpcs": { + "FeatureViewDirectWrite": { + "methods": [ + "feature_view_direct_write" + ] + }, "FetchFeatureValues": { "methods": [ "fetch_feature_values" ] }, + "GenerateFetchAccessToken": { + "methods": [ + "generate_fetch_access_token" + ] + }, "SearchNearestEntities": { "methods": [ "search_nearest_entities" @@ -855,11 +909,21 @@ "rest": { "libraryClient": "FeatureOnlineStoreServiceClient", "rpcs": { + "FeatureViewDirectWrite": { + "methods": [ + "feature_view_direct_write" + ] + }, "FetchFeatureValues": { "methods": [ "fetch_feature_values" ] }, + "GenerateFetchAccessToken": { + "methods": [ + "generate_fetch_access_token" + ] + }, "SearchNearestEntities": { "methods": [ "search_nearest_entities" @@ -1451,6 +1515,100 @@ } } }, + "GenAiCacheService": { + "clients": { + "grpc": { + "libraryClient": "GenAiCacheServiceClient", + "rpcs": { + "CreateCachedContent": { + "methods": [ + "create_cached_content" + ] + }, + "DeleteCachedContent": { + "methods": [ + "delete_cached_content" + ] + }, + "GetCachedContent": { + "methods": [ + "get_cached_content" + ] + }, + "ListCachedContents": { + "methods": [ + "list_cached_contents" + ] + }, + "UpdateCachedContent": { + "methods": [ + "update_cached_content" + ] + } + } + }, + "grpc-async": { + "libraryClient": "GenAiCacheServiceAsyncClient", + "rpcs": { + "CreateCachedContent": { + "methods": [ + "create_cached_content" + ] + }, + "DeleteCachedContent": { + "methods": [ + "delete_cached_content" + ] + }, + "GetCachedContent": { + "methods": [ + "get_cached_content" + ] + }, + "ListCachedContents": { + "methods": [ + "list_cached_contents" + ] + }, + "UpdateCachedContent": { + "methods": [ + "update_cached_content" + ] + } + } + }, + "rest": { + "libraryClient": "GenAiCacheServiceClient", + "rpcs": { + "CreateCachedContent": { + "methods": [ + "create_cached_content" + ] + }, + "DeleteCachedContent": { + "methods": [ + "delete_cached_content" + ] + }, + "GetCachedContent": { + "methods": [ + "get_cached_content" + ] + }, + "ListCachedContents": { + "methods": [ + "list_cached_contents" + ] + }, + "UpdateCachedContent": { + "methods": [ + "update_cached_content" + ] + } + } + } + } + }, "GenAiTuningService": { "clients": { "grpc": { @@ -3003,6 +3161,11 @@ "grpc": { "libraryClient": "ModelGardenServiceClient", "rpcs": { + "Deploy": { + "methods": [ + "deploy" + ] + }, "GetPublisherModel": { "methods": [ "get_publisher_model" @@ -3013,6 +3176,11 @@ "grpc-async": { "libraryClient": "ModelGardenServiceAsyncClient", "rpcs": { + "Deploy": { + "methods": [ + "deploy" + ] + }, "GetPublisherModel": { "methods": [ "get_publisher_model" @@ -3023,6 +3191,11 @@ "rest": { "libraryClient": "ModelGardenServiceClient", "rpcs": { + "Deploy": { + "methods": [ + "deploy" + ] + }, "GetPublisherModel": { "methods": [ "get_publisher_model" @@ -3097,6 +3270,11 @@ "list_model_evaluations" ] }, + "ListModelVersionCheckpoints": { + "methods": [ + "list_model_version_checkpoints" + ] + }, "ListModelVersions": { "methods": [ "list_model_versions" @@ -3192,6 +3370,11 @@ "list_model_evaluations" ] }, + "ListModelVersionCheckpoints": { + "methods": [ + "list_model_version_checkpoints" + ] + }, "ListModelVersions": { "methods": [ "list_model_versions" @@ -3287,6 +3470,11 @@ "list_model_evaluations" ] }, + "ListModelVersionCheckpoints": { + "methods": [ + "list_model_version_checkpoints" + ] + }, "ListModelVersions": { "methods": [ "list_model_versions" @@ -3903,6 +4091,11 @@ "direct_raw_predict" ] }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, "Explain": { "methods": [ "explain" @@ -3973,6 +4166,11 @@ "direct_raw_predict" ] }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, "Explain": { "methods": [ "explain" @@ -4043,6 +4241,11 @@ "direct_raw_predict" ] }, + "EmbedContent": { + "methods": [ + "embed_content" + ] + }, "Explain": { "methods": [ "explain" @@ -4102,6 +4305,164 @@ } } }, + "ReasoningEngineExecutionService": { + "clients": { + "grpc": { + "libraryClient": "ReasoningEngineExecutionServiceClient", + "rpcs": { + "AsyncQueryReasoningEngine": { + "methods": [ + "async_query_reasoning_engine" + ] + }, + "QueryReasoningEngine": { + "methods": [ + "query_reasoning_engine" + ] + }, + "StreamQueryReasoningEngine": { + "methods": [ + "stream_query_reasoning_engine" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ReasoningEngineExecutionServiceAsyncClient", + "rpcs": { + "AsyncQueryReasoningEngine": { + "methods": [ + "async_query_reasoning_engine" + ] + }, + "QueryReasoningEngine": { + "methods": [ + "query_reasoning_engine" + ] + }, + "StreamQueryReasoningEngine": { + "methods": [ + "stream_query_reasoning_engine" + ] + } + } + }, + "rest": { + "libraryClient": "ReasoningEngineExecutionServiceClient", + "rpcs": { + "AsyncQueryReasoningEngine": { + "methods": [ + "async_query_reasoning_engine" + ] + }, + "QueryReasoningEngine": { + "methods": [ + "query_reasoning_engine" + ] + }, + "StreamQueryReasoningEngine": { + "methods": [ + "stream_query_reasoning_engine" + ] + } + } + } + } + }, + "ReasoningEngineService": { + "clients": { + "grpc": { + "libraryClient": "ReasoningEngineServiceClient", + "rpcs": { + "CreateReasoningEngine": { + "methods": [ + "create_reasoning_engine" + ] + }, + "DeleteReasoningEngine": { + "methods": [ + "delete_reasoning_engine" + ] + }, + "GetReasoningEngine": { + "methods": [ + "get_reasoning_engine" + ] + }, + "ListReasoningEngines": { + "methods": [ + "list_reasoning_engines" + ] + }, + "UpdateReasoningEngine": { + "methods": [ + "update_reasoning_engine" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ReasoningEngineServiceAsyncClient", + "rpcs": { + "CreateReasoningEngine": { + "methods": [ + "create_reasoning_engine" + ] + }, + "DeleteReasoningEngine": { + "methods": [ + "delete_reasoning_engine" + ] + }, + "GetReasoningEngine": { + "methods": [ + "get_reasoning_engine" + ] + }, + "ListReasoningEngines": { + "methods": [ + "list_reasoning_engines" + ] + }, + "UpdateReasoningEngine": { + "methods": [ + "update_reasoning_engine" + ] + } + } + }, + "rest": { + "libraryClient": "ReasoningEngineServiceClient", + "rpcs": { + "CreateReasoningEngine": { + "methods": [ + "create_reasoning_engine" + ] + }, + "DeleteReasoningEngine": { + "methods": [ + "delete_reasoning_engine" + ] + }, + "GetReasoningEngine": { + "methods": [ + "get_reasoning_engine" + ] + }, + "ListReasoningEngines": { + "methods": [ + "list_reasoning_engines" + ] + }, + "UpdateReasoningEngine": { + "methods": [ + "update_reasoning_engine" + ] + } + } + } + } + }, "ScheduleService": { "clients": { "grpc": { @@ -4226,6 +4587,130 @@ } } }, + "SessionService": { + "clients": { + "grpc": { + "libraryClient": "SessionServiceClient", + "rpcs": { + "AppendEvent": { + "methods": [ + "append_event" + ] + }, + "CreateSession": { + "methods": [ + "create_session" + ] + }, + "DeleteSession": { + "methods": [ + "delete_session" + ] + }, + "GetSession": { + "methods": [ + "get_session" + ] + }, + "ListEvents": { + "methods": [ + "list_events" + ] + }, + "ListSessions": { + "methods": [ + "list_sessions" + ] + }, + "UpdateSession": { + "methods": [ + "update_session" + ] + } + } + }, + "grpc-async": { + "libraryClient": "SessionServiceAsyncClient", + "rpcs": { + "AppendEvent": { + "methods": [ + "append_event" + ] + }, + "CreateSession": { + "methods": [ + "create_session" + ] + }, + "DeleteSession": { + "methods": [ + "delete_session" + ] + }, + "GetSession": { + "methods": [ + "get_session" + ] + }, + "ListEvents": { + "methods": [ + "list_events" + ] + }, + "ListSessions": { + "methods": [ + "list_sessions" + ] + }, + "UpdateSession": { + "methods": [ + "update_session" + ] + } + } + }, + "rest": { + "libraryClient": "SessionServiceClient", + "rpcs": { + "AppendEvent": { + "methods": [ + "append_event" + ] + }, + "CreateSession": { + "methods": [ + "create_session" + ] + }, + "DeleteSession": { + "methods": [ + "delete_session" + ] + }, + "GetSession": { + "methods": [ + "get_session" + ] + }, + "ListEvents": { + "methods": [ + "list_events" + ] + }, + "ListSessions": { + "methods": [ + "list_sessions" + ] + }, + "UpdateSession": { + "methods": [ + "update_session" + ] + } + } + } + } + }, "SpecialistPoolService": { "clients": { "grpc": { @@ -4814,6 +5299,11 @@ "get_rag_corpus" ] }, + "GetRagEngineConfig": { + "methods": [ + "get_rag_engine_config" + ] + }, "GetRagFile": { "methods": [ "get_rag_file" @@ -4839,6 +5329,11 @@ "update_rag_corpus" ] }, + "UpdateRagEngineConfig": { + "methods": [ + "update_rag_engine_config" + ] + }, "UploadRagFile": { "methods": [ "upload_rag_file" @@ -4869,6 +5364,11 @@ "get_rag_corpus" ] }, + "GetRagEngineConfig": { + "methods": [ + "get_rag_engine_config" + ] + }, "GetRagFile": { "methods": [ "get_rag_file" @@ -4894,6 +5394,11 @@ "update_rag_corpus" ] }, + "UpdateRagEngineConfig": { + "methods": [ + "update_rag_engine_config" + ] + }, "UploadRagFile": { "methods": [ "upload_rag_file" @@ -4924,6 +5429,11 @@ "get_rag_corpus" ] }, + "GetRagEngineConfig": { + "methods": [ + "get_rag_engine_config" + ] + }, "GetRagFile": { "methods": [ "get_rag_file" @@ -4949,6 +5459,11 @@ "update_rag_corpus" ] }, + "UpdateRagEngineConfig": { + "methods": [ + "update_rag_engine_config" + ] + }, "UploadRagFile": { "methods": [ "upload_rag_file" @@ -4963,6 +5478,16 @@ "grpc": { "libraryClient": "VertexRagServiceClient", "rpcs": { + "AskContexts": { + "methods": [ + "ask_contexts" + ] + }, + "AsyncRetrieveContexts": { + "methods": [ + "async_retrieve_contexts" + ] + }, "AugmentPrompt": { "methods": [ "augment_prompt" @@ -4983,6 +5508,16 @@ "grpc-async": { "libraryClient": "VertexRagServiceAsyncClient", "rpcs": { + "AskContexts": { + "methods": [ + "ask_contexts" + ] + }, + "AsyncRetrieveContexts": { + "methods": [ + "async_retrieve_contexts" + ] + }, "AugmentPrompt": { "methods": [ "augment_prompt" @@ -5003,6 +5538,16 @@ "rest": { "libraryClient": "VertexRagServiceClient", "rpcs": { + "AskContexts": { + "methods": [ + "ask_contexts" + ] + }, + "AsyncRetrieveContexts": { + "methods": [ + "async_retrieve_contexts" + ] + }, "AugmentPrompt": { "methods": [ "augment_prompt" diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index 2f53073eb7..9359a7eded 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.74.0" # {x-release-please-version} +__version__ = "1.148.1" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1/services/__init__.py b/google/cloud/aiplatform_v1/services/__init__.py index 8f6cf06824..cbf94b283c 100644 --- a/google/cloud/aiplatform_v1/services/__init__.py +++ b/google/cloud/aiplatform_v1/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/__init__.py b/google/cloud/aiplatform_v1/services/data_foundry_service/__init__.py new file mode 100644 index 0000000000..a2826e0883 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import DataFoundryServiceClient +from .async_client import DataFoundryServiceAsyncClient + +__all__ = ( + "DataFoundryServiceClient", + "DataFoundryServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/async_client.py b/google/cloud/aiplatform_v1/services/data_foundry_service/async_client.py new file mode 100644 index 0000000000..54c5d7dd22 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/async_client.py @@ -0,0 +1,1115 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging as std_logging +from collections import OrderedDict +import re +from typing import ( + Dict, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1.types import data_foundry_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .transports.base import DataFoundryServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import DataFoundryServiceGrpcAsyncIOTransport +from .client import DataFoundryServiceClient + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class DataFoundryServiceAsyncClient: + """Service for generating and preparing datasets for Gen AI + evaluation. + """ + + _client: DataFoundryServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = DataFoundryServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = DataFoundryServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = DataFoundryServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = DataFoundryServiceClient._DEFAULT_UNIVERSE + + common_billing_account_path = staticmethod( + DataFoundryServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + DataFoundryServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(DataFoundryServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + DataFoundryServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + DataFoundryServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + DataFoundryServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(DataFoundryServiceClient.common_project_path) + parse_common_project_path = staticmethod( + DataFoundryServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(DataFoundryServiceClient.common_location_path) + parse_common_location_path = staticmethod( + DataFoundryServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DataFoundryServiceAsyncClient: The constructed client. + """ + sa_info_func = ( + DataFoundryServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(DataFoundryServiceAsyncClient, info, *args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DataFoundryServiceAsyncClient: The constructed client. + """ + sa_file_func = ( + DataFoundryServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(DataFoundryServiceAsyncClient, filename, *args, **kwargs) + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return DataFoundryServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> DataFoundryServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DataFoundryServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = DataFoundryServiceClient.get_transport_class + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + DataFoundryServiceTransport, + Callable[..., DataFoundryServiceTransport], + ] + ] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the data foundry service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,DataFoundryServiceTransport,Callable[..., DataFoundryServiceTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the DataFoundryServiceTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = DataFoundryServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient`.", + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "credentialsType": None, + } + ), + ) + + async def generate_synthetic_data( + self, + request: Optional[ + Union[data_foundry_service.GenerateSyntheticDataRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> data_foundry_service.GenerateSyntheticDataResponse: + r"""Generates synthetic data based on the provided + configuration. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_generate_synthetic_data(): + # Create a client + client = aiplatform_v1.DataFoundryServiceAsyncClient() + + # Initialize request argument(s) + task_description = aiplatform_v1.TaskDescriptionStrategy() + task_description.task_description = "task_description_value" + + output_field_specs = aiplatform_v1.OutputFieldSpec() + output_field_specs.field_name = "field_name_value" + + request = aiplatform_v1.GenerateSyntheticDataRequest( + task_description=task_description, + location="location_value", + count=553, + output_field_specs=output_field_specs, + ) + + # Make the request + response = await client.generate_synthetic_data(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GenerateSyntheticDataRequest, dict]]): + The request object. Request message for + DataFoundryService.GenerateSyntheticData. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.GenerateSyntheticDataResponse: + The response containing the generated + data. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, data_foundry_service.GenerateSyntheticDataRequest): + request = data_foundry_service.GenerateSyntheticDataRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.generate_synthetic_data + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.delete_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.wait_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[ + self._client._transport.test_iam_permissions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_location] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "DataFoundryServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +__all__ = ("DataFoundryServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/client.py b/google/cloud/aiplatform_v1/services/data_foundry_service/client.py new file mode 100644 index 0000000000..bc0511191c --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/client.py @@ -0,0 +1,1603 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging +import os +import re +from typing import ( + Dict, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + +from google.cloud.aiplatform_v1.types import data_foundry_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .transports.base import DataFoundryServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import DataFoundryServiceGrpcTransport +from .transports.grpc_asyncio import DataFoundryServiceGrpcAsyncIOTransport +from .transports.rest import DataFoundryServiceRestTransport + +try: + from .transports.rest_asyncio import AsyncDataFoundryServiceRestTransport + + HAS_ASYNC_REST_DEPENDENCIES = True +except ImportError as e: # pragma: NO COVER + HAS_ASYNC_REST_DEPENDENCIES = False + ASYNC_REST_EXCEPTION = e + + +class DataFoundryServiceClientMeta(type): + """Metaclass for the DataFoundryService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[DataFoundryServiceTransport]] + _transport_registry["grpc"] = DataFoundryServiceGrpcTransport + _transport_registry["grpc_asyncio"] = DataFoundryServiceGrpcAsyncIOTransport + _transport_registry["rest"] = DataFoundryServiceRestTransport + if HAS_ASYNC_REST_DEPENDENCIES: # pragma: NO COVER + _transport_registry["rest_asyncio"] = AsyncDataFoundryServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[DataFoundryServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if ( + label == "rest_asyncio" and not HAS_ASYNC_REST_DEPENDENCIES + ): # pragma: NO COVER + raise ASYNC_REST_EXCEPTION + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DataFoundryServiceClient(metaclass=DataFoundryServiceClientMeta): + """Service for generating and preparing datasets for Gen AI + evaluation. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DataFoundryServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DataFoundryServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DataFoundryServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DataFoundryServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = DataFoundryServiceClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert: + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = DataFoundryServiceClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert, use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = DataFoundryServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = DataFoundryServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = DataFoundryServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = DataFoundryServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True + + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + DataFoundryServiceTransport, + Callable[..., DataFoundryServiceTransport], + ] + ] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the data foundry service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,DataFoundryServiceTransport,Callable[..., DataFoundryServiceTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the DataFoundryServiceTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + DataFoundryServiceClient._read_environment_variables() + ) + self._client_cert_source = DataFoundryServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = DataFoundryServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, DataFoundryServiceTransport) + if transport_provided: + # transport is a DataFoundryServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(DataFoundryServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or DataFoundryServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + transport_init: Union[ + Type[DataFoundryServiceTransport], + Callable[..., DataFoundryServiceTransport], + ] = ( + DataFoundryServiceClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., DataFoundryServiceTransport], transport) + ) + + if "rest_asyncio" in str(transport_init): + unsupported_params = { + "google.api_core.client_options.ClientOptions.credentials_file": self._client_options.credentials_file, + "google.api_core.client_options.ClientOptions.scopes": self._client_options.scopes, + "google.api_core.client_options.ClientOptions.quota_project_id": self._client_options.quota_project_id, + "google.api_core.client_options.ClientOptions.client_cert_source": self._client_options.client_cert_source, + "google.api_core.client_options.ClientOptions.api_audience": self._client_options.api_audience, + } + provided_unsupported_params = [ + name + for name, value in unsupported_params.items() + if value is not None + ] + if provided_unsupported_params: + raise core_exceptions.AsyncRestUnsupportedParameterError( # type: ignore + f"The following provided parameters are not supported for `transport=rest_asyncio`: {', '.join(provided_unsupported_params)}" + ) + self._transport = transport_init( + credentials=credentials, + host=self._api_endpoint, + client_info=client_info, + ) + return + + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + # initialize with the provided callable or the passed in class + self._transport = transport_init( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.aiplatform_v1.DataFoundryServiceClient`.", + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "credentialsType": None, + } + ), + ) + + def generate_synthetic_data( + self, + request: Optional[ + Union[data_foundry_service.GenerateSyntheticDataRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> data_foundry_service.GenerateSyntheticDataResponse: + r"""Generates synthetic data based on the provided + configuration. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_generate_synthetic_data(): + # Create a client + client = aiplatform_v1.DataFoundryServiceClient() + + # Initialize request argument(s) + task_description = aiplatform_v1.TaskDescriptionStrategy() + task_description.task_description = "task_description_value" + + output_field_specs = aiplatform_v1.OutputFieldSpec() + output_field_specs.field_name = "field_name_value" + + request = aiplatform_v1.GenerateSyntheticDataRequest( + task_description=task_description, + location="location_value", + count=553, + output_field_specs=output_field_specs, + ) + + # Make the request + response = client.generate_synthetic_data(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GenerateSyntheticDataRequest, dict]): + The request object. Request message for + DataFoundryService.GenerateSyntheticData. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.GenerateSyntheticDataResponse: + The response containing the generated + data. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, data_foundry_service.GenerateSyntheticDataRequest): + request = data_foundry_service.GenerateSyntheticDataRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.generate_synthetic_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "DataFoundryServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.wait_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_location] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + +__all__ = ("DataFoundryServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/README.rst b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/README.rst new file mode 100644 index 0000000000..a91d4d2ae3 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`DataFoundryServiceTransport` is the ABC for all transports. +- public child `DataFoundryServiceGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `DataFoundryServiceGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseDataFoundryServiceRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `DataFoundryServiceRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/__init__.py new file mode 100644 index 0000000000..6ddbc3a77b --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/__init__.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type, Tuple + +from .base import DataFoundryServiceTransport +from .grpc import DataFoundryServiceGrpcTransport +from .grpc_asyncio import DataFoundryServiceGrpcAsyncIOTransport +from .rest import DataFoundryServiceRestTransport +from .rest import DataFoundryServiceRestInterceptor + +ASYNC_REST_CLASSES: Tuple[str, ...] +try: + from .rest_asyncio import AsyncDataFoundryServiceRestTransport + from .rest_asyncio import AsyncDataFoundryServiceRestInterceptor + + ASYNC_REST_CLASSES = ( + "AsyncDataFoundryServiceRestTransport", + "AsyncDataFoundryServiceRestInterceptor", + ) + HAS_REST_ASYNC = True +except ImportError: # pragma: NO COVER + ASYNC_REST_CLASSES = () + HAS_REST_ASYNC = False + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[DataFoundryServiceTransport]] +_transport_registry["grpc"] = DataFoundryServiceGrpcTransport +_transport_registry["grpc_asyncio"] = DataFoundryServiceGrpcAsyncIOTransport +_transport_registry["rest"] = DataFoundryServiceRestTransport +if HAS_REST_ASYNC: # pragma: NO COVER + _transport_registry["rest_asyncio"] = AsyncDataFoundryServiceRestTransport + +__all__ = ( + "DataFoundryServiceTransport", + "DataFoundryServiceGrpcTransport", + "DataFoundryServiceGrpcAsyncIOTransport", + "DataFoundryServiceRestTransport", + "DataFoundryServiceRestInterceptor", +) + ASYNC_REST_CLASSES diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/base.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/base.py new file mode 100644 index 0000000000..0f6ab84fbe --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/base.py @@ -0,0 +1,326 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + +from google.cloud.aiplatform_v1.types import data_foundry_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class DataFoundryServiceTransport(abc.ABC): + """Abstract transport class for DataFoundryService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the scopes. + self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, + ) + elif credentials is None and not self._ignore_credentials: + credentials, _ = google.auth.default( + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.generate_synthetic_data: gapic_v1.method.wrap_method( + self.generate_synthetic_data, + default_timeout=None, + client_info=client_info, + ), + self.get_location: gapic_v1.method.wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: gapic_v1.method.wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: gapic_v1.method.wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: gapic_v1.method.wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def generate_synthetic_data( + self, + ) -> Callable[ + [data_foundry_service.GenerateSyntheticDataRequest], + Union[ + data_foundry_service.GenerateSyntheticDataResponse, + Awaitable[data_foundry_service.GenerateSyntheticDataResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("DataFoundryServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/grpc.py new file mode 100644 index 0000000000..5bdf6acf48 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/grpc.py @@ -0,0 +1,571 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +import logging as std_logging +import pickle +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message + +import grpc # type: ignore +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import data_foundry_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import DataFoundryServiceTransport, DEFAULT_CLIENT_INFO + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class DataFoundryServiceGrpcTransport(DataFoundryServiceTransport): + """gRPC backend transport for DataFoundryService. + + Service for generating and preparing datasets for Gen AI + evaluation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, grpc.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def generate_synthetic_data( + self, + ) -> Callable[ + [data_foundry_service.GenerateSyntheticDataRequest], + data_foundry_service.GenerateSyntheticDataResponse, + ]: + r"""Return a callable for the generate synthetic data method over gRPC. + + Generates synthetic data based on the provided + configuration. + + Returns: + Callable[[~.GenerateSyntheticDataRequest], + ~.GenerateSyntheticDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_synthetic_data" not in self._stubs: + self._stubs["generate_synthetic_data"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DataFoundryService/GenerateSyntheticData", + request_serializer=data_foundry_service.GenerateSyntheticDataRequest.serialize, + response_deserializer=data_foundry_service.GenerateSyntheticDataResponse.deserialize, + ) + return self._stubs["generate_synthetic_data"] + + def close(self): + self._logged_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "wait_operation" not in self._stubs: + self._stubs["wait_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("DataFoundryServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..e84f47fa33 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/grpc_asyncio.py @@ -0,0 +1,644 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import inspect +import json +import pickle +import logging as std_logging +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message + +import grpc # type: ignore +import proto # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import data_foundry_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import DataFoundryServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import DataFoundryServiceGrpcTransport + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class DataFoundryServiceGrpcAsyncIOTransport(DataFoundryServiceTransport): + """gRPC AsyncIO backend transport for DataFoundryService. + + Service for generating and preparing datasets for Gen AI + evaluation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, aio.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def generate_synthetic_data( + self, + ) -> Callable[ + [data_foundry_service.GenerateSyntheticDataRequest], + Awaitable[data_foundry_service.GenerateSyntheticDataResponse], + ]: + r"""Return a callable for the generate synthetic data method over gRPC. + + Generates synthetic data based on the provided + configuration. + + Returns: + Callable[[~.GenerateSyntheticDataRequest], + Awaitable[~.GenerateSyntheticDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_synthetic_data" not in self._stubs: + self._stubs["generate_synthetic_data"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DataFoundryService/GenerateSyntheticData", + request_serializer=data_foundry_service.GenerateSyntheticDataRequest.serialize, + response_deserializer=data_foundry_service.GenerateSyntheticDataResponse.deserialize, + ) + return self._stubs["generate_synthetic_data"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.generate_synthetic_data: self._wrap_method( + self.generate_synthetic_data, + default_timeout=None, + client_info=client_info, + ), + self.get_location: self._wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: self._wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: self._wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: self._wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: self._wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: self._wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: self._wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + + def close(self): + return self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc_asyncio" + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "wait_operation" not in self._stubs: + self._stubs["wait_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("DataFoundryServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest.py new file mode 100644 index 0000000000..f3a7fb2a37 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest.py @@ -0,0 +1,2043 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging +import json # type: ignore + +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import gapic_v1 +import google.protobuf + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore + +from requests import __version__ as requests_version +import dataclasses +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + + +from google.cloud.aiplatform_v1.types import data_foundry_service +from google.longrunning import operations_pb2 # type: ignore + + +from .rest_base import _BaseDataFoundryServiceRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"requests@{requests_version}", +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class DataFoundryServiceRestInterceptor: + """Interceptor for DataFoundryService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the DataFoundryServiceRestTransport. + + .. code-block:: python + class MyCustomDataFoundryServiceInterceptor(DataFoundryServiceRestInterceptor): + def pre_generate_synthetic_data(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_synthetic_data(self, response): + logging.log(f"Received response: {response}") + return response + + transport = DataFoundryServiceRestTransport(interceptor=MyCustomDataFoundryServiceInterceptor()) + client = DataFoundryServiceClient(transport=transport) + + + """ + + def pre_generate_synthetic_data( + self, + request: data_foundry_service.GenerateSyntheticDataRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + data_foundry_service.GenerateSyntheticDataRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for generate_synthetic_data + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_generate_synthetic_data( + self, response: data_foundry_service.GenerateSyntheticDataResponse + ) -> data_foundry_service.GenerateSyntheticDataResponse: + """Post-rpc interceptor for generate_synthetic_data + + DEPRECATED. Please use the `post_generate_synthetic_data_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. This `post_generate_synthetic_data` interceptor runs + before the `post_generate_synthetic_data_with_metadata` interceptor. + """ + return response + + def post_generate_synthetic_data_with_metadata( + self, + response: data_foundry_service.GenerateSyntheticDataResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + data_foundry_service.GenerateSyntheticDataResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for generate_synthetic_data + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DataFoundryService server but before it is returned to user code. + + We recommend only using this `post_generate_synthetic_data_with_metadata` + interceptor in new development instead of the `post_generate_synthetic_data` interceptor. + When both interceptors are used, this `post_generate_synthetic_data_with_metadata` interceptor runs after the + `post_generate_synthetic_data` interceptor. The (possibly modified) response returned by + `post_generate_synthetic_data` will be passed to + `post_generate_synthetic_data_with_metadata`. + """ + return response, metadata + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.WaitOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class DataFoundryServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: DataFoundryServiceRestInterceptor + + +class DataFoundryServiceRestTransport(_BaseDataFoundryServiceRestTransport): + """REST backend synchronous transport for DataFoundryService. + + Service for generating and preparing datasets for Gen AI + evaluation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[DataFoundryServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or DataFoundryServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _GenerateSyntheticData( + _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.GenerateSyntheticData") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: data_foundry_service.GenerateSyntheticDataRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> data_foundry_service.GenerateSyntheticDataResponse: + r"""Call the generate synthetic data method over HTTP. + + Args: + request (~.data_foundry_service.GenerateSyntheticDataRequest): + The request object. Request message for + DataFoundryService.GenerateSyntheticData. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.data_foundry_service.GenerateSyntheticDataResponse: + The response containing the generated + data. + + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_http_options() + ) + + request, metadata = self._interceptor.pre_generate_synthetic_data( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_transcoded_request( + http_options, request + ) + + body = _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.GenerateSyntheticData", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GenerateSyntheticData", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + DataFoundryServiceRestTransport._GenerateSyntheticData._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = data_foundry_service.GenerateSyntheticDataResponse() + pb_resp = data_foundry_service.GenerateSyntheticDataResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_generate_synthetic_data(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_generate_synthetic_data_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + data_foundry_service.GenerateSyntheticDataResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceClient.generate_synthetic_data", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GenerateSyntheticData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + @property + def generate_synthetic_data( + self, + ) -> Callable[ + [data_foundry_service.GenerateSyntheticDataRequest], + data_foundry_service.GenerateSyntheticDataResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateSyntheticData(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation( + _BaseDataFoundryServiceRestTransport._BaseGetLocation, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.GetLocation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseGetLocation._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_location(request, metadata) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseGetLocation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseGetLocation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetLocation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._GetLocation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = locations_pb2.Location() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_location(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetLocation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations( + _BaseDataFoundryServiceRestTransport._BaseListLocations, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.ListLocations") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseListLocations._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseListLocations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseListLocations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "ListLocations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._ListLocations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_list_locations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "ListLocations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy( + _BaseDataFoundryServiceRestTransport._BaseGetIamPolicy, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.GetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseGetIamPolicy._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy( + _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.SetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy._get_http_options() + ) + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) + + body = _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_set_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "SetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions( + _BaseDataFoundryServiceRestTransport._BaseTestIamPermissions, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.TestIamPermissions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseTestIamPermissions._get_http_options() + ) + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + DataFoundryServiceRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_test_iam_permissions(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "TestIamPermissions", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation( + _BaseDataFoundryServiceRestTransport._BaseCancelOperation, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.CancelOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseCancelOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseCancelOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseCancelOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.CancelOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "CancelOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._CancelOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation( + _BaseDataFoundryServiceRestTransport._BaseDeleteOperation, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.DeleteOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseDeleteOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseDeleteOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseDeleteOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.DeleteOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "DeleteOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._DeleteOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation( + _BaseDataFoundryServiceRestTransport._BaseGetOperation, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.GetOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseGetOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseGetOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseGetOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._GetOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations( + _BaseDataFoundryServiceRestTransport._BaseListOperations, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.ListOperations") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseListOperations._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseListOperations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseListOperations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "ListOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._ListOperations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_list_operations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "ListOperations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation( + _BaseDataFoundryServiceRestTransport._BaseWaitOperation, + DataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("DataFoundryServiceRestTransport.WaitOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseWaitOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseWaitOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseWaitOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "WaitOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = DataFoundryServiceRestTransport._WaitOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_wait_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "WaitOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("DataFoundryServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_asyncio.py new file mode 100644 index 0000000000..463a35b85b --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_asyncio.py @@ -0,0 +1,2187 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import google.auth + +try: + import aiohttp # type: ignore + from google.auth.aio.transport.sessions import AsyncAuthorizedSession # type: ignore + from google.api_core import rest_streaming_async # type: ignore + from google.api_core.operations_v1 import AsyncOperationsRestClient # type: ignore +except ImportError as e: # pragma: NO COVER + raise ImportError( + "`rest_asyncio` transport requires the library to be installed with the `async_rest` extra. Install the library with the `async_rest` extra using `pip install google-cloud-aiplatform[async_rest]`" + ) from e + +from google.auth.aio import credentials as ga_credentials_async # type: ignore + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.api_core import retry_async as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming_async # type: ignore +import google.protobuf + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore + +import json # type: ignore +import dataclasses +from typing import Any, Dict, List, Callable, Tuple, Optional, Sequence, Union + + +from google.cloud.aiplatform_v1.types import data_foundry_service +from google.longrunning import operations_pb2 # type: ignore + + +from .rest_base import _BaseDataFoundryServiceRestTransport + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +import logging + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"google-auth@{google.auth.__version__}", +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class AsyncDataFoundryServiceRestInterceptor: + """Asynchronous Interceptor for DataFoundryService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the AsyncDataFoundryServiceRestTransport. + + .. code-block:: python + class MyCustomDataFoundryServiceInterceptor(DataFoundryServiceRestInterceptor): + async def pre_generate_synthetic_data(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_generate_synthetic_data(self, response): + logging.log(f"Received response: {response}") + return response + + transport = AsyncDataFoundryServiceRestTransport(interceptor=MyCustomDataFoundryServiceInterceptor()) + client = async DataFoundryServiceClient(transport=transport) + + + """ + + async def pre_generate_synthetic_data( + self, + request: data_foundry_service.GenerateSyntheticDataRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + data_foundry_service.GenerateSyntheticDataRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for generate_synthetic_data + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_generate_synthetic_data( + self, response: data_foundry_service.GenerateSyntheticDataResponse + ) -> data_foundry_service.GenerateSyntheticDataResponse: + """Post-rpc interceptor for generate_synthetic_data + + DEPRECATED. Please use the `post_generate_synthetic_data_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. This `post_generate_synthetic_data` interceptor runs + before the `post_generate_synthetic_data_with_metadata` interceptor. + """ + return response + + async def post_generate_synthetic_data_with_metadata( + self, + response: data_foundry_service.GenerateSyntheticDataResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + data_foundry_service.GenerateSyntheticDataResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for generate_synthetic_data + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DataFoundryService server but before it is returned to user code. + + We recommend only using this `post_generate_synthetic_data_with_metadata` + interceptor in new development instead of the `post_generate_synthetic_data` interceptor. + When both interceptors are used, this `post_generate_synthetic_data_with_metadata` interceptor runs after the + `post_generate_synthetic_data` interceptor. The (possibly modified) response returned by + `post_generate_synthetic_data` will be passed to + `post_generate_synthetic_data_with_metadata`. + """ + return response, metadata + + async def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_get_iam_policy( + self, response: policy_pb2.Policy + ) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_set_iam_policy( + self, response: policy_pb2.Policy + ) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + async def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.WaitOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the DataFoundryService server. + """ + return request, metadata + + async def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the DataFoundryService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class AsyncDataFoundryServiceRestStub: + _session: AsyncAuthorizedSession + _host: str + _interceptor: AsyncDataFoundryServiceRestInterceptor + + +class AsyncDataFoundryServiceRestTransport(_BaseDataFoundryServiceRestTransport): + """Asynchronous REST backend transport for DataFoundryService. + + Service for generating and preparing datasets for Gen AI + evaluation. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials_async.Credentials] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + url_scheme: str = "https", + interceptor: Optional[AsyncDataFoundryServiceRestInterceptor] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.aio.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + url_scheme (str): the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=False, + url_scheme=url_scheme, + api_audience=None, + ) + self._session = AsyncAuthorizedSession(self._credentials) # type: ignore + self._interceptor = interceptor or AsyncDataFoundryServiceRestInterceptor() + self._wrap_with_kind = True + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.generate_synthetic_data: self._wrap_method( + self.generate_synthetic_data, + default_timeout=None, + client_info=client_info, + ), + self.get_location: self._wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: self._wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: self._wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: self._wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: self._wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: self._wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: self._wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + + class _GenerateSyntheticData( + _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.GenerateSyntheticData") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: data_foundry_service.GenerateSyntheticDataRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> data_foundry_service.GenerateSyntheticDataResponse: + r"""Call the generate synthetic data method over HTTP. + + Args: + request (~.data_foundry_service.GenerateSyntheticDataRequest): + The request object. Request message for + DataFoundryService.GenerateSyntheticData. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.data_foundry_service.GenerateSyntheticDataResponse: + The response containing the generated + data. + + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_http_options() + ) + + request, metadata = await self._interceptor.pre_generate_synthetic_data( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_transcoded_request( + http_options, request + ) + + body = _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.GenerateSyntheticData", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GenerateSyntheticData", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncDataFoundryServiceRestTransport._GenerateSyntheticData._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = data_foundry_service.GenerateSyntheticDataResponse() + pb_resp = data_foundry_service.GenerateSyntheticDataResponse.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_generate_synthetic_data(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_generate_synthetic_data_with_metadata( + resp, response_metadata + ) + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + data_foundry_service.GenerateSyntheticDataResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.generate_synthetic_data", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GenerateSyntheticData", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + @property + def generate_synthetic_data( + self, + ) -> Callable[ + [data_foundry_service.GenerateSyntheticDataRequest], + data_foundry_service.GenerateSyntheticDataResponse, + ]: + return self._GenerateSyntheticData(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation( + _BaseDataFoundryServiceRestTransport._BaseGetLocation, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.GetLocation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseGetLocation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_location( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseGetLocation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseGetLocation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetLocation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncDataFoundryServiceRestTransport._GetLocation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = locations_pb2.Location() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_location(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetLocation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations( + _BaseDataFoundryServiceRestTransport._BaseListLocations, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.ListLocations") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseListLocations._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_locations( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseListLocations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseListLocations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "ListLocations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncDataFoundryServiceRestTransport._ListLocations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_list_locations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "ListLocations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy( + _BaseDataFoundryServiceRestTransport._BaseGetIamPolicy, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.GetIamPolicy") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseGetIamPolicy._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_iam_policy( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncDataFoundryServiceRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy( + _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.SetIamPolicy") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy._get_http_options() + ) + + request, metadata = await self._interceptor.pre_set_iam_policy( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) + + body = _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncDataFoundryServiceRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_set_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "SetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions( + _BaseDataFoundryServiceRestTransport._BaseTestIamPermissions, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.TestIamPermissions") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseTestIamPermissions._get_http_options() + ) + + request, metadata = await self._interceptor.pre_test_iam_permissions( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncDataFoundryServiceRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_test_iam_permissions(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "TestIamPermissions", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation( + _BaseDataFoundryServiceRestTransport._BaseCancelOperation, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.CancelOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseCancelOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_cancel_operation( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseCancelOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseCancelOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.CancelOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "CancelOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncDataFoundryServiceRestTransport._CancelOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + return await self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation( + _BaseDataFoundryServiceRestTransport._BaseDeleteOperation, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.DeleteOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseDeleteOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_delete_operation( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseDeleteOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseDeleteOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.DeleteOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "DeleteOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncDataFoundryServiceRestTransport._DeleteOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + return await self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation( + _BaseDataFoundryServiceRestTransport._BaseGetOperation, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.GetOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseGetOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_operation( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseGetOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseGetOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncDataFoundryServiceRestTransport._GetOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "GetOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations( + _BaseDataFoundryServiceRestTransport._BaseListOperations, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.ListOperations") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseListOperations._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_operations( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseListOperations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseListOperations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "ListOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncDataFoundryServiceRestTransport._ListOperations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_list_operations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "ListOperations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation( + _BaseDataFoundryServiceRestTransport._BaseWaitOperation, + AsyncDataFoundryServiceRestStub, + ): + def __hash__(self): + return hash("AsyncDataFoundryServiceRestTransport.WaitOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options = ( + _BaseDataFoundryServiceRestTransport._BaseWaitOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_wait_operation( + request, metadata + ) + transcoded_request = _BaseDataFoundryServiceRestTransport._BaseWaitOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseDataFoundryServiceRestTransport._BaseWaitOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.DataFoundryServiceClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "WaitOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncDataFoundryServiceRestTransport._WaitOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_wait_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.DataFoundryServiceAsyncClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.DataFoundryService", + "rpcName": "WaitOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def kind(self) -> str: + return "rest_asyncio" + + async def close(self): + await self._session.close() diff --git a/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_base.py new file mode 100644 index 0000000000..639e07b64a --- /dev/null +++ b/google/cloud/aiplatform_v1/services/data_foundry_service/transports/rest_base.py @@ -0,0 +1,2527 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from .base import DataFoundryServiceTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.aiplatform_v1.types import data_foundry_service +from google.longrunning import operations_pb2 # type: ignore + + +class _BaseDataFoundryServiceRestTransport(DataFoundryServiceTransport): + """Base REST backend transport for DataFoundryService. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseGenerateSyntheticData: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{location=projects/*/locations/*}:generateSyntheticData", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = data_foundry_service.GenerateSyntheticDataRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseDataFoundryServiceRestTransport._BaseGenerateSyntheticData._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetLocation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseListLocations: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*}/locations", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseGetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseSetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + body = json.dumps(transcoded_request["body"]) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseTestIamPermissions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseCancelOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseDeleteOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseGetOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseListOperations: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseWaitOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + +__all__ = ("_BaseDataFoundryServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/__init__.py b/google/cloud/aiplatform_v1/services/dataset_service/__init__.py index feaf1648a0..42f97659ee 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py index 6de1c71606..46d879d521 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.dataset_service import pagers from google.cloud.aiplatform_v1.types import annotation from google.cloud.aiplatform_v1.types import annotation_spec @@ -62,10 +61,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport from .client import DatasetServiceClient @@ -148,7 +149,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: DatasetServiceAsyncClient: The constructed client. """ - return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + DatasetServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(DatasetServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -164,7 +168,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: DatasetServiceAsyncClient: The constructed client. """ - return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + DatasetServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(DatasetServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -305,21 +312,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.DatasetServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.DatasetService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.DatasetService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.DatasetService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.DatasetService", + "credentialsType": None, + } + ), ) async def create_dataset( @@ -407,7 +416,10 @@ async def sample_create_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) + flattened_params = [parent, dataset] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -501,7 +513,6 @@ async def sample_get_dataset(): request (Optional[Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]]): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 name (:class:`str`): Required. The name of the Dataset resource. @@ -526,7 +537,10 @@ async def sample_get_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -629,9 +643,9 @@ async def sample_update_dataset(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` - - ``description`` - - ``labels`` + - ``display_name`` + - ``description`` + - ``labels`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -653,7 +667,10 @@ async def sample_update_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) + flattened_params = [dataset, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -769,7 +786,10 @@ async def sample_list_datasets(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -903,7 +923,10 @@ async def sample_delete_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1039,7 +1062,10 @@ async def sample_import_data(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) + flattened_params = [name, import_configs] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1175,7 +1201,10 @@ async def sample_export_data(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) + flattened_params = [name, export_config] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1317,7 +1346,10 @@ async def sample_create_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset_version]) + flattened_params = [parent, dataset_version] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1430,7 +1462,7 @@ async def sample_update_dataset_version(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` + - ``display_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1450,7 +1482,10 @@ async def sample_update_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset_version, update_mask]) + flattened_params = [dataset_version, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1579,7 +1614,10 @@ async def sample_delete_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1671,7 +1709,6 @@ async def sample_get_dataset_version(): request (Optional[Union[google.cloud.aiplatform_v1.types.GetDatasetVersionRequest, dict]]): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 name (:class:`str`): Required. The resource name of the Dataset version to delete. Format: @@ -1695,7 +1732,10 @@ async def sample_get_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1810,7 +1850,10 @@ async def sample_list_dataset_versions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1939,7 +1982,10 @@ async def sample_restore_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2060,7 +2106,10 @@ async def sample_list_data_items(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2286,7 +2335,10 @@ async def sample_list_saved_queries(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2420,7 +2472,10 @@ async def sample_delete_saved_query(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2537,7 +2592,10 @@ async def sample_get_annotation_spec(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2589,10 +2647,7 @@ async def list_annotations( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAnnotationsAsyncPager: - r"""Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + r"""Lists Annotations belongs to a dataitem. .. code-block:: python @@ -2653,7 +2708,10 @@ async def sample_list_annotations(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3415,5 +3473,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("DatasetServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/client.py b/google/cloud/aiplatform_v1/services/dataset_service/client.py index 36099764f8..ec2ffcab17 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.dataset_service import pagers from google.cloud.aiplatform_v1.types import annotation from google.cloud.aiplatform_v1.types import annotation_spec @@ -76,10 +77,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import DatasetServiceGrpcTransport from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport @@ -181,6 +184,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -490,12 +521,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = DatasetServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -503,7 +530,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -535,20 +562,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = DatasetServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -641,6 +662,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -729,11 +777,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = DatasetServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + DatasetServiceClient._read_environment_variables() + ) self._client_cert_source = DatasetServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -845,21 +891,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.DatasetServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.DatasetService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.DatasetService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.DatasetService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.DatasetService", + "credentialsType": None, + } + ), ) def create_dataset( @@ -947,7 +997,10 @@ def sample_create_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) + flattened_params = [parent, dataset] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1038,7 +1091,6 @@ def sample_get_dataset(): request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 name (str): Required. The name of the Dataset resource. @@ -1063,7 +1115,10 @@ def sample_get_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1163,9 +1218,9 @@ def sample_update_dataset(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` - - ``description`` - - ``labels`` + - ``display_name`` + - ``description`` + - ``labels`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1187,7 +1242,10 @@ def sample_update_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) + flattened_params = [dataset, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1300,7 +1358,10 @@ def sample_list_datasets(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1431,7 +1492,10 @@ def sample_delete_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1564,7 +1628,10 @@ def sample_import_data(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) + flattened_params = [name, import_configs] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1697,7 +1764,10 @@ def sample_export_data(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) + flattened_params = [name, export_config] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1836,7 +1906,10 @@ def sample_create_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset_version]) + flattened_params = [parent, dataset_version] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1946,7 +2019,7 @@ def sample_update_dataset_version(): [google.protobuf.FieldMask][google.protobuf.FieldMask]. Updatable fields: - - ``display_name`` + - ``display_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1966,7 +2039,10 @@ def sample_update_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset_version, update_mask]) + flattened_params = [dataset_version, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2092,7 +2168,10 @@ def sample_delete_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2181,7 +2260,6 @@ def sample_get_dataset_version(): request (Union[google.cloud.aiplatform_v1.types.GetDatasetVersionRequest, dict]): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 name (str): Required. The resource name of the Dataset version to delete. Format: @@ -2205,7 +2283,10 @@ def sample_get_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2317,7 +2398,10 @@ def sample_list_dataset_versions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2443,7 +2527,10 @@ def sample_restore_dataset_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2561,7 +2648,10 @@ def sample_list_data_items(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2782,7 +2872,10 @@ def sample_list_saved_queries(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2913,7 +3006,10 @@ def sample_delete_saved_query(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3027,7 +3123,10 @@ def sample_get_annotation_spec(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3076,10 +3175,7 @@ def list_annotations( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAnnotationsPager: - r"""Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + r"""Lists Annotations belongs to a dataitem. .. code-block:: python @@ -3140,7 +3236,10 @@ def sample_list_annotations(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3248,16 +3347,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -3303,16 +3406,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -3475,16 +3582,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -3596,16 +3707,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -3718,16 +3833,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -3778,16 +3897,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -3833,16 +3956,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -3888,21 +4015,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("DatasetServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/dataset_service/pagers.py b/google/cloud/aiplatform_v1/services/dataset_service/pagers.py index ea491f086a..e23cee90df 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py index 1ed7fdf423..508606defe 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py index 58b68f57fa..6d63373ac0 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import annotation_spec from google.cloud.aiplatform_v1.types import dataset @@ -42,6 +43,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class DatasetServiceTransport(abc.ABC): """Abstract transport class for DatasetService.""" @@ -73,9 +77,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -88,8 +93,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -104,11 +107,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -520,13 +528,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py index 2ad15b38e4..baa6b4d9ba 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -80,12 +80,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.DatasetService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -163,9 +162,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -299,9 +299,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -852,10 +853,7 @@ def list_annotations( ]: r"""Return a callable for the list annotations method over gRPC. - Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + Lists Annotations belongs to a dataitem. Returns: Callable[[~.ListAnnotationsRequest], diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py index c4632d9e27..4ecd09f9ff 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -159,8 +159,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -211,9 +212,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -878,10 +880,7 @@ def list_annotations( ]: r"""Return a callable for the list annotations method over gRPC. - Lists Annotations belongs to a dataitem - This RPC is only available in InternalDatasetService. It - is only used for exporting conversation data to CCAI - Insights. + Lists Annotations belongs to a dataitem. Returns: Callable[[~.ListAnnotationsRequest], diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py index e4a8863afc..639effd652 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -68,6 +69,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class DatasetServiceRestInterceptor: """Interceptor for DatasetService. @@ -261,12 +265,35 @@ def post_create_dataset( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_create_dataset` interceptor runs + before the `post_create_dataset_with_metadata` interceptor. """ return response + def post_create_dataset_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_create_dataset_with_metadata` + interceptor in new development instead of the `post_create_dataset` interceptor. + When both interceptors are used, this `post_create_dataset_with_metadata` interceptor runs after the + `post_create_dataset` interceptor. The (possibly modified) response returned by + `post_create_dataset` will be passed to + `post_create_dataset_with_metadata`. + """ + return response, metadata + def pre_create_dataset_version( self, request: dataset_service.CreateDatasetVersionRequest, @@ -287,12 +314,35 @@ def post_create_dataset_version( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_create_dataset_version` interceptor runs + before the `post_create_dataset_version_with_metadata` interceptor. """ return response + def post_create_dataset_version_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_create_dataset_version_with_metadata` + interceptor in new development instead of the `post_create_dataset_version` interceptor. + When both interceptors are used, this `post_create_dataset_version_with_metadata` interceptor runs after the + `post_create_dataset_version` interceptor. The (possibly modified) response returned by + `post_create_dataset_version` will be passed to + `post_create_dataset_version_with_metadata`. + """ + return response, metadata + def pre_delete_dataset( self, request: dataset_service.DeleteDatasetRequest, @@ -312,12 +362,35 @@ def post_delete_dataset( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_delete_dataset` interceptor runs + before the `post_delete_dataset_with_metadata` interceptor. """ return response + def post_delete_dataset_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_delete_dataset_with_metadata` + interceptor in new development instead of the `post_delete_dataset` interceptor. + When both interceptors are used, this `post_delete_dataset_with_metadata` interceptor runs after the + `post_delete_dataset` interceptor. The (possibly modified) response returned by + `post_delete_dataset` will be passed to + `post_delete_dataset_with_metadata`. + """ + return response, metadata + def pre_delete_dataset_version( self, request: dataset_service.DeleteDatasetVersionRequest, @@ -338,12 +411,35 @@ def post_delete_dataset_version( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_delete_dataset_version` interceptor runs + before the `post_delete_dataset_version_with_metadata` interceptor. """ return response + def post_delete_dataset_version_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_delete_dataset_version_with_metadata` + interceptor in new development instead of the `post_delete_dataset_version` interceptor. + When both interceptors are used, this `post_delete_dataset_version_with_metadata` interceptor runs after the + `post_delete_dataset_version` interceptor. The (possibly modified) response returned by + `post_delete_dataset_version` will be passed to + `post_delete_dataset_version_with_metadata`. + """ + return response, metadata + def pre_delete_saved_query( self, request: dataset_service.DeleteSavedQueryRequest, @@ -363,12 +459,35 @@ def post_delete_saved_query( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_saved_query - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_saved_query_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_delete_saved_query` interceptor runs + before the `post_delete_saved_query_with_metadata` interceptor. """ return response + def post_delete_saved_query_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_saved_query + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_delete_saved_query_with_metadata` + interceptor in new development instead of the `post_delete_saved_query` interceptor. + When both interceptors are used, this `post_delete_saved_query_with_metadata` interceptor runs after the + `post_delete_saved_query` interceptor. The (possibly modified) response returned by + `post_delete_saved_query` will be passed to + `post_delete_saved_query_with_metadata`. + """ + return response, metadata + def pre_export_data( self, request: dataset_service.ExportDataRequest, @@ -388,12 +507,35 @@ def post_export_data( ) -> operations_pb2.Operation: """Post-rpc interceptor for export_data - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_export_data_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_export_data` interceptor runs + before the `post_export_data_with_metadata` interceptor. """ return response + def post_export_data_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for export_data + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_export_data_with_metadata` + interceptor in new development instead of the `post_export_data` interceptor. + When both interceptors are used, this `post_export_data_with_metadata` interceptor runs after the + `post_export_data` interceptor. The (possibly modified) response returned by + `post_export_data` will be passed to + `post_export_data_with_metadata`. + """ + return response, metadata + def pre_get_annotation_spec( self, request: dataset_service.GetAnnotationSpecRequest, @@ -414,12 +556,35 @@ def post_get_annotation_spec( ) -> annotation_spec.AnnotationSpec: """Post-rpc interceptor for get_annotation_spec - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_annotation_spec_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_get_annotation_spec` interceptor runs + before the `post_get_annotation_spec_with_metadata` interceptor. """ return response + def post_get_annotation_spec_with_metadata( + self, + response: annotation_spec.AnnotationSpec, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[annotation_spec.AnnotationSpec, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_annotation_spec + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_get_annotation_spec_with_metadata` + interceptor in new development instead of the `post_get_annotation_spec` interceptor. + When both interceptors are used, this `post_get_annotation_spec_with_metadata` interceptor runs after the + `post_get_annotation_spec` interceptor. The (possibly modified) response returned by + `post_get_annotation_spec` will be passed to + `post_get_annotation_spec_with_metadata`. + """ + return response, metadata + def pre_get_dataset( self, request: dataset_service.GetDatasetRequest, @@ -437,12 +602,35 @@ def pre_get_dataset( def post_get_dataset(self, response: dataset.Dataset) -> dataset.Dataset: """Post-rpc interceptor for get_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_get_dataset` interceptor runs + before the `post_get_dataset_with_metadata` interceptor. """ return response + def post_get_dataset_with_metadata( + self, + response: dataset.Dataset, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[dataset.Dataset, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_get_dataset_with_metadata` + interceptor in new development instead of the `post_get_dataset` interceptor. + When both interceptors are used, this `post_get_dataset_with_metadata` interceptor runs after the + `post_get_dataset` interceptor. The (possibly modified) response returned by + `post_get_dataset` will be passed to + `post_get_dataset_with_metadata`. + """ + return response, metadata + def pre_get_dataset_version( self, request: dataset_service.GetDatasetVersionRequest, @@ -463,12 +651,35 @@ def post_get_dataset_version( ) -> dataset_version.DatasetVersion: """Post-rpc interceptor for get_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_get_dataset_version` interceptor runs + before the `post_get_dataset_version_with_metadata` interceptor. """ return response + def post_get_dataset_version_with_metadata( + self, + response: dataset_version.DatasetVersion, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[dataset_version.DatasetVersion, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_get_dataset_version_with_metadata` + interceptor in new development instead of the `post_get_dataset_version` interceptor. + When both interceptors are used, this `post_get_dataset_version_with_metadata` interceptor runs after the + `post_get_dataset_version` interceptor. The (possibly modified) response returned by + `post_get_dataset_version` will be passed to + `post_get_dataset_version_with_metadata`. + """ + return response, metadata + def pre_import_data( self, request: dataset_service.ImportDataRequest, @@ -488,12 +699,35 @@ def post_import_data( ) -> operations_pb2.Operation: """Post-rpc interceptor for import_data - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_import_data_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_import_data` interceptor runs + before the `post_import_data_with_metadata` interceptor. """ return response + def post_import_data_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for import_data + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_import_data_with_metadata` + interceptor in new development instead of the `post_import_data` interceptor. + When both interceptors are used, this `post_import_data_with_metadata` interceptor runs after the + `post_import_data` interceptor. The (possibly modified) response returned by + `post_import_data` will be passed to + `post_import_data_with_metadata`. + """ + return response, metadata + def pre_list_annotations( self, request: dataset_service.ListAnnotationsRequest, @@ -513,12 +747,37 @@ def post_list_annotations( ) -> dataset_service.ListAnnotationsResponse: """Post-rpc interceptor for list_annotations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_annotations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_annotations` interceptor runs + before the `post_list_annotations_with_metadata` interceptor. """ return response + def post_list_annotations_with_metadata( + self, + response: dataset_service.ListAnnotationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListAnnotationsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_annotations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_annotations_with_metadata` + interceptor in new development instead of the `post_list_annotations` interceptor. + When both interceptors are used, this `post_list_annotations_with_metadata` interceptor runs after the + `post_list_annotations` interceptor. The (possibly modified) response returned by + `post_list_annotations` will be passed to + `post_list_annotations_with_metadata`. + """ + return response, metadata + def pre_list_data_items( self, request: dataset_service.ListDataItemsRequest, @@ -538,12 +797,37 @@ def post_list_data_items( ) -> dataset_service.ListDataItemsResponse: """Post-rpc interceptor for list_data_items - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_data_items_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_data_items` interceptor runs + before the `post_list_data_items_with_metadata` interceptor. """ return response + def post_list_data_items_with_metadata( + self, + response: dataset_service.ListDataItemsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListDataItemsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_data_items + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_data_items_with_metadata` + interceptor in new development instead of the `post_list_data_items` interceptor. + When both interceptors are used, this `post_list_data_items_with_metadata` interceptor runs after the + `post_list_data_items` interceptor. The (possibly modified) response returned by + `post_list_data_items` will be passed to + `post_list_data_items_with_metadata`. + """ + return response, metadata + def pre_list_datasets( self, request: dataset_service.ListDatasetsRequest, @@ -563,12 +847,37 @@ def post_list_datasets( ) -> dataset_service.ListDatasetsResponse: """Post-rpc interceptor for list_datasets - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_datasets_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_datasets` interceptor runs + before the `post_list_datasets_with_metadata` interceptor. """ return response + def post_list_datasets_with_metadata( + self, + response: dataset_service.ListDatasetsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListDatasetsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_datasets + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_datasets_with_metadata` + interceptor in new development instead of the `post_list_datasets` interceptor. + When both interceptors are used, this `post_list_datasets_with_metadata` interceptor runs after the + `post_list_datasets` interceptor. The (possibly modified) response returned by + `post_list_datasets` will be passed to + `post_list_datasets_with_metadata`. + """ + return response, metadata + def pre_list_dataset_versions( self, request: dataset_service.ListDatasetVersionsRequest, @@ -589,12 +898,38 @@ def post_list_dataset_versions( ) -> dataset_service.ListDatasetVersionsResponse: """Post-rpc interceptor for list_dataset_versions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_dataset_versions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_dataset_versions` interceptor runs + before the `post_list_dataset_versions_with_metadata` interceptor. """ return response + def post_list_dataset_versions_with_metadata( + self, + response: dataset_service.ListDatasetVersionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListDatasetVersionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_dataset_versions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_dataset_versions_with_metadata` + interceptor in new development instead of the `post_list_dataset_versions` interceptor. + When both interceptors are used, this `post_list_dataset_versions_with_metadata` interceptor runs after the + `post_list_dataset_versions` interceptor. The (possibly modified) response returned by + `post_list_dataset_versions` will be passed to + `post_list_dataset_versions_with_metadata`. + """ + return response, metadata + def pre_list_saved_queries( self, request: dataset_service.ListSavedQueriesRequest, @@ -614,12 +949,38 @@ def post_list_saved_queries( ) -> dataset_service.ListSavedQueriesResponse: """Post-rpc interceptor for list_saved_queries - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_saved_queries_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_saved_queries` interceptor runs + before the `post_list_saved_queries_with_metadata` interceptor. """ return response + def post_list_saved_queries_with_metadata( + self, + response: dataset_service.ListSavedQueriesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListSavedQueriesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_saved_queries + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_saved_queries_with_metadata` + interceptor in new development instead of the `post_list_saved_queries` interceptor. + When both interceptors are used, this `post_list_saved_queries_with_metadata` interceptor runs after the + `post_list_saved_queries` interceptor. The (possibly modified) response returned by + `post_list_saved_queries` will be passed to + `post_list_saved_queries_with_metadata`. + """ + return response, metadata + def pre_restore_dataset_version( self, request: dataset_service.RestoreDatasetVersionRequest, @@ -640,12 +1001,35 @@ def post_restore_dataset_version( ) -> operations_pb2.Operation: """Post-rpc interceptor for restore_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_restore_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_restore_dataset_version` interceptor runs + before the `post_restore_dataset_version_with_metadata` interceptor. """ return response + def post_restore_dataset_version_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for restore_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_restore_dataset_version_with_metadata` + interceptor in new development instead of the `post_restore_dataset_version` interceptor. + When both interceptors are used, this `post_restore_dataset_version_with_metadata` interceptor runs after the + `post_restore_dataset_version` interceptor. The (possibly modified) response returned by + `post_restore_dataset_version` will be passed to + `post_restore_dataset_version_with_metadata`. + """ + return response, metadata + def pre_search_data_items( self, request: dataset_service.SearchDataItemsRequest, @@ -665,12 +1049,37 @@ def post_search_data_items( ) -> dataset_service.SearchDataItemsResponse: """Post-rpc interceptor for search_data_items - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_data_items_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_search_data_items` interceptor runs + before the `post_search_data_items_with_metadata` interceptor. """ return response + def post_search_data_items_with_metadata( + self, + response: dataset_service.SearchDataItemsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.SearchDataItemsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for search_data_items + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_search_data_items_with_metadata` + interceptor in new development instead of the `post_search_data_items` interceptor. + When both interceptors are used, this `post_search_data_items_with_metadata` interceptor runs after the + `post_search_data_items` interceptor. The (possibly modified) response returned by + `post_search_data_items` will be passed to + `post_search_data_items_with_metadata`. + """ + return response, metadata + def pre_update_dataset( self, request: dataset_service.UpdateDatasetRequest, @@ -688,12 +1097,35 @@ def pre_update_dataset( def post_update_dataset(self, response: gca_dataset.Dataset) -> gca_dataset.Dataset: """Post-rpc interceptor for update_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_update_dataset` interceptor runs + before the `post_update_dataset_with_metadata` interceptor. """ return response + def post_update_dataset_with_metadata( + self, + response: gca_dataset.Dataset, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_dataset.Dataset, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_update_dataset_with_metadata` + interceptor in new development instead of the `post_update_dataset` interceptor. + When both interceptors are used, this `post_update_dataset_with_metadata` interceptor runs after the + `post_update_dataset` interceptor. The (possibly modified) response returned by + `post_update_dataset` will be passed to + `post_update_dataset_with_metadata`. + """ + return response, metadata + def pre_update_dataset_version( self, request: dataset_service.UpdateDatasetVersionRequest, @@ -714,12 +1146,37 @@ def post_update_dataset_version( ) -> gca_dataset_version.DatasetVersion: """Post-rpc interceptor for update_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_update_dataset_version` interceptor runs + before the `post_update_dataset_version_with_metadata` interceptor. """ return response + def post_update_dataset_version_with_metadata( + self, + response: gca_dataset_version.DatasetVersion, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_dataset_version.DatasetVersion, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for update_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_update_dataset_version_with_metadata` + interceptor in new development instead of the `post_update_dataset_version` interceptor. + When both interceptors are used, this `post_update_dataset_version_with_metadata` interceptor runs after the + `post_update_dataset_version` interceptor. The (possibly modified) response returned by + `post_update_dataset_version` will be passed to + `post_update_dataset_version_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1010,9 +1467,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1223,6 +1681,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1359,6 +1821,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1367,6 +1833,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1407,6 +1881,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1569,6 +2047,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1597,6 +2079,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1709,6 +2195,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1717,6 +2207,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1777,6 +2275,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1939,6 +2441,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1979,6 +2485,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2091,6 +2601,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2099,6 +2613,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2159,6 +2681,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2317,6 +2843,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2357,6 +2887,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2469,6 +3003,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2489,6 +3031,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2537,6 +3083,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2703,6 +3253,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2735,6 +3289,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2843,6 +3401,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2851,6 +3413,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2911,6 +3481,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3016,7 +3590,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3056,6 +3630,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3168,7 +3746,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3208,6 +3786,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3312,7 +3894,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3351,6 +3933,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3458,7 +4044,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3497,6 +4083,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3603,7 +4193,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3642,6 +4232,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_saved_query(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_saved_query_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3755,7 +4349,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3795,6 +4389,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_export_data(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_export_data_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3941,6 +4539,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_annotation_spec(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_annotation_spec_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4007,7 +4609,6 @@ def __call__( request (~.dataset_service.GetDatasetRequest): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4088,6 +4689,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4154,7 +4759,6 @@ def __call__( request (~.dataset_service.GetDatasetVersionRequest): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4233,6 +4837,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4346,7 +4954,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4386,6 +4994,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_import_data(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_import_data_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4532,6 +5144,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_annotations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_annotations_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4678,6 +5294,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_data_items(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_data_items_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4824,6 +5444,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_datasets(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_datasets_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4973,6 +5597,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_dataset_versions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_dataset_versions_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5121,6 +5749,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_saved_queries(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_saved_queries_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5230,7 +5862,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5269,6 +5901,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_restore_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_restore_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5415,6 +6051,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_search_data_items(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_search_data_items_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5567,6 +6207,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5718,6 +6362,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5965,7 +6613,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -6107,7 +6754,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -6249,7 +6895,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -6392,7 +7037,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -6539,7 +7183,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -6683,7 +7326,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -6799,7 +7441,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -6915,7 +7556,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -7057,7 +7697,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -7199,7 +7838,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py index 2ac56fb4af..17fff63566 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -86,6 +86,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncDatasetServiceRestInterceptor: """Asynchronous Interceptor for DatasetService. @@ -279,12 +282,35 @@ async def post_create_dataset( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_create_dataset` interceptor runs + before the `post_create_dataset_with_metadata` interceptor. """ return response + async def post_create_dataset_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_create_dataset_with_metadata` + interceptor in new development instead of the `post_create_dataset` interceptor. + When both interceptors are used, this `post_create_dataset_with_metadata` interceptor runs after the + `post_create_dataset` interceptor. The (possibly modified) response returned by + `post_create_dataset` will be passed to + `post_create_dataset_with_metadata`. + """ + return response, metadata + async def pre_create_dataset_version( self, request: dataset_service.CreateDatasetVersionRequest, @@ -305,12 +331,35 @@ async def post_create_dataset_version( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_create_dataset_version` interceptor runs + before the `post_create_dataset_version_with_metadata` interceptor. """ return response + async def post_create_dataset_version_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_create_dataset_version_with_metadata` + interceptor in new development instead of the `post_create_dataset_version` interceptor. + When both interceptors are used, this `post_create_dataset_version_with_metadata` interceptor runs after the + `post_create_dataset_version` interceptor. The (possibly modified) response returned by + `post_create_dataset_version` will be passed to + `post_create_dataset_version_with_metadata`. + """ + return response, metadata + async def pre_delete_dataset( self, request: dataset_service.DeleteDatasetRequest, @@ -330,12 +379,35 @@ async def post_delete_dataset( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_delete_dataset` interceptor runs + before the `post_delete_dataset_with_metadata` interceptor. """ return response + async def post_delete_dataset_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_delete_dataset_with_metadata` + interceptor in new development instead of the `post_delete_dataset` interceptor. + When both interceptors are used, this `post_delete_dataset_with_metadata` interceptor runs after the + `post_delete_dataset` interceptor. The (possibly modified) response returned by + `post_delete_dataset` will be passed to + `post_delete_dataset_with_metadata`. + """ + return response, metadata + async def pre_delete_dataset_version( self, request: dataset_service.DeleteDatasetVersionRequest, @@ -356,12 +428,35 @@ async def post_delete_dataset_version( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_delete_dataset_version` interceptor runs + before the `post_delete_dataset_version_with_metadata` interceptor. """ return response + async def post_delete_dataset_version_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_delete_dataset_version_with_metadata` + interceptor in new development instead of the `post_delete_dataset_version` interceptor. + When both interceptors are used, this `post_delete_dataset_version_with_metadata` interceptor runs after the + `post_delete_dataset_version` interceptor. The (possibly modified) response returned by + `post_delete_dataset_version` will be passed to + `post_delete_dataset_version_with_metadata`. + """ + return response, metadata + async def pre_delete_saved_query( self, request: dataset_service.DeleteSavedQueryRequest, @@ -381,12 +476,35 @@ async def post_delete_saved_query( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_saved_query - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_saved_query_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_delete_saved_query` interceptor runs + before the `post_delete_saved_query_with_metadata` interceptor. """ return response + async def post_delete_saved_query_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_saved_query + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_delete_saved_query_with_metadata` + interceptor in new development instead of the `post_delete_saved_query` interceptor. + When both interceptors are used, this `post_delete_saved_query_with_metadata` interceptor runs after the + `post_delete_saved_query` interceptor. The (possibly modified) response returned by + `post_delete_saved_query` will be passed to + `post_delete_saved_query_with_metadata`. + """ + return response, metadata + async def pre_export_data( self, request: dataset_service.ExportDataRequest, @@ -406,12 +524,35 @@ async def post_export_data( ) -> operations_pb2.Operation: """Post-rpc interceptor for export_data - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_export_data_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_export_data` interceptor runs + before the `post_export_data_with_metadata` interceptor. """ return response + async def post_export_data_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for export_data + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_export_data_with_metadata` + interceptor in new development instead of the `post_export_data` interceptor. + When both interceptors are used, this `post_export_data_with_metadata` interceptor runs after the + `post_export_data` interceptor. The (possibly modified) response returned by + `post_export_data` will be passed to + `post_export_data_with_metadata`. + """ + return response, metadata + async def pre_get_annotation_spec( self, request: dataset_service.GetAnnotationSpecRequest, @@ -432,12 +573,35 @@ async def post_get_annotation_spec( ) -> annotation_spec.AnnotationSpec: """Post-rpc interceptor for get_annotation_spec - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_annotation_spec_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_get_annotation_spec` interceptor runs + before the `post_get_annotation_spec_with_metadata` interceptor. """ return response + async def post_get_annotation_spec_with_metadata( + self, + response: annotation_spec.AnnotationSpec, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[annotation_spec.AnnotationSpec, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_annotation_spec + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_get_annotation_spec_with_metadata` + interceptor in new development instead of the `post_get_annotation_spec` interceptor. + When both interceptors are used, this `post_get_annotation_spec_with_metadata` interceptor runs after the + `post_get_annotation_spec` interceptor. The (possibly modified) response returned by + `post_get_annotation_spec` will be passed to + `post_get_annotation_spec_with_metadata`. + """ + return response, metadata + async def pre_get_dataset( self, request: dataset_service.GetDatasetRequest, @@ -455,12 +619,35 @@ async def pre_get_dataset( async def post_get_dataset(self, response: dataset.Dataset) -> dataset.Dataset: """Post-rpc interceptor for get_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_get_dataset` interceptor runs + before the `post_get_dataset_with_metadata` interceptor. """ return response + async def post_get_dataset_with_metadata( + self, + response: dataset.Dataset, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[dataset.Dataset, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_get_dataset_with_metadata` + interceptor in new development instead of the `post_get_dataset` interceptor. + When both interceptors are used, this `post_get_dataset_with_metadata` interceptor runs after the + `post_get_dataset` interceptor. The (possibly modified) response returned by + `post_get_dataset` will be passed to + `post_get_dataset_with_metadata`. + """ + return response, metadata + async def pre_get_dataset_version( self, request: dataset_service.GetDatasetVersionRequest, @@ -481,12 +668,35 @@ async def post_get_dataset_version( ) -> dataset_version.DatasetVersion: """Post-rpc interceptor for get_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_get_dataset_version` interceptor runs + before the `post_get_dataset_version_with_metadata` interceptor. """ return response + async def post_get_dataset_version_with_metadata( + self, + response: dataset_version.DatasetVersion, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[dataset_version.DatasetVersion, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_get_dataset_version_with_metadata` + interceptor in new development instead of the `post_get_dataset_version` interceptor. + When both interceptors are used, this `post_get_dataset_version_with_metadata` interceptor runs after the + `post_get_dataset_version` interceptor. The (possibly modified) response returned by + `post_get_dataset_version` will be passed to + `post_get_dataset_version_with_metadata`. + """ + return response, metadata + async def pre_import_data( self, request: dataset_service.ImportDataRequest, @@ -506,12 +716,35 @@ async def post_import_data( ) -> operations_pb2.Operation: """Post-rpc interceptor for import_data - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_import_data_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_import_data` interceptor runs + before the `post_import_data_with_metadata` interceptor. """ return response + async def post_import_data_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for import_data + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_import_data_with_metadata` + interceptor in new development instead of the `post_import_data` interceptor. + When both interceptors are used, this `post_import_data_with_metadata` interceptor runs after the + `post_import_data` interceptor. The (possibly modified) response returned by + `post_import_data` will be passed to + `post_import_data_with_metadata`. + """ + return response, metadata + async def pre_list_annotations( self, request: dataset_service.ListAnnotationsRequest, @@ -531,12 +764,37 @@ async def post_list_annotations( ) -> dataset_service.ListAnnotationsResponse: """Post-rpc interceptor for list_annotations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_annotations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_annotations` interceptor runs + before the `post_list_annotations_with_metadata` interceptor. """ return response + async def post_list_annotations_with_metadata( + self, + response: dataset_service.ListAnnotationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListAnnotationsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_annotations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_annotations_with_metadata` + interceptor in new development instead of the `post_list_annotations` interceptor. + When both interceptors are used, this `post_list_annotations_with_metadata` interceptor runs after the + `post_list_annotations` interceptor. The (possibly modified) response returned by + `post_list_annotations` will be passed to + `post_list_annotations_with_metadata`. + """ + return response, metadata + async def pre_list_data_items( self, request: dataset_service.ListDataItemsRequest, @@ -556,12 +814,37 @@ async def post_list_data_items( ) -> dataset_service.ListDataItemsResponse: """Post-rpc interceptor for list_data_items - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_data_items_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_data_items` interceptor runs + before the `post_list_data_items_with_metadata` interceptor. """ return response + async def post_list_data_items_with_metadata( + self, + response: dataset_service.ListDataItemsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListDataItemsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_data_items + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_data_items_with_metadata` + interceptor in new development instead of the `post_list_data_items` interceptor. + When both interceptors are used, this `post_list_data_items_with_metadata` interceptor runs after the + `post_list_data_items` interceptor. The (possibly modified) response returned by + `post_list_data_items` will be passed to + `post_list_data_items_with_metadata`. + """ + return response, metadata + async def pre_list_datasets( self, request: dataset_service.ListDatasetsRequest, @@ -581,12 +864,37 @@ async def post_list_datasets( ) -> dataset_service.ListDatasetsResponse: """Post-rpc interceptor for list_datasets - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_datasets_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_datasets` interceptor runs + before the `post_list_datasets_with_metadata` interceptor. """ return response + async def post_list_datasets_with_metadata( + self, + response: dataset_service.ListDatasetsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListDatasetsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_datasets + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_datasets_with_metadata` + interceptor in new development instead of the `post_list_datasets` interceptor. + When both interceptors are used, this `post_list_datasets_with_metadata` interceptor runs after the + `post_list_datasets` interceptor. The (possibly modified) response returned by + `post_list_datasets` will be passed to + `post_list_datasets_with_metadata`. + """ + return response, metadata + async def pre_list_dataset_versions( self, request: dataset_service.ListDatasetVersionsRequest, @@ -607,12 +915,38 @@ async def post_list_dataset_versions( ) -> dataset_service.ListDatasetVersionsResponse: """Post-rpc interceptor for list_dataset_versions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_dataset_versions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_dataset_versions` interceptor runs + before the `post_list_dataset_versions_with_metadata` interceptor. """ return response + async def post_list_dataset_versions_with_metadata( + self, + response: dataset_service.ListDatasetVersionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListDatasetVersionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_dataset_versions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_dataset_versions_with_metadata` + interceptor in new development instead of the `post_list_dataset_versions` interceptor. + When both interceptors are used, this `post_list_dataset_versions_with_metadata` interceptor runs after the + `post_list_dataset_versions` interceptor. The (possibly modified) response returned by + `post_list_dataset_versions` will be passed to + `post_list_dataset_versions_with_metadata`. + """ + return response, metadata + async def pre_list_saved_queries( self, request: dataset_service.ListSavedQueriesRequest, @@ -632,12 +966,38 @@ async def post_list_saved_queries( ) -> dataset_service.ListSavedQueriesResponse: """Post-rpc interceptor for list_saved_queries - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_saved_queries_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_list_saved_queries` interceptor runs + before the `post_list_saved_queries_with_metadata` interceptor. """ return response + async def post_list_saved_queries_with_metadata( + self, + response: dataset_service.ListSavedQueriesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.ListSavedQueriesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_saved_queries + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_list_saved_queries_with_metadata` + interceptor in new development instead of the `post_list_saved_queries` interceptor. + When both interceptors are used, this `post_list_saved_queries_with_metadata` interceptor runs after the + `post_list_saved_queries` interceptor. The (possibly modified) response returned by + `post_list_saved_queries` will be passed to + `post_list_saved_queries_with_metadata`. + """ + return response, metadata + async def pre_restore_dataset_version( self, request: dataset_service.RestoreDatasetVersionRequest, @@ -658,12 +1018,35 @@ async def post_restore_dataset_version( ) -> operations_pb2.Operation: """Post-rpc interceptor for restore_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_restore_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_restore_dataset_version` interceptor runs + before the `post_restore_dataset_version_with_metadata` interceptor. """ return response + async def post_restore_dataset_version_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for restore_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_restore_dataset_version_with_metadata` + interceptor in new development instead of the `post_restore_dataset_version` interceptor. + When both interceptors are used, this `post_restore_dataset_version_with_metadata` interceptor runs after the + `post_restore_dataset_version` interceptor. The (possibly modified) response returned by + `post_restore_dataset_version` will be passed to + `post_restore_dataset_version_with_metadata`. + """ + return response, metadata + async def pre_search_data_items( self, request: dataset_service.SearchDataItemsRequest, @@ -683,12 +1066,37 @@ async def post_search_data_items( ) -> dataset_service.SearchDataItemsResponse: """Post-rpc interceptor for search_data_items - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_data_items_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_search_data_items` interceptor runs + before the `post_search_data_items_with_metadata` interceptor. """ return response + async def post_search_data_items_with_metadata( + self, + response: dataset_service.SearchDataItemsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + dataset_service.SearchDataItemsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for search_data_items + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_search_data_items_with_metadata` + interceptor in new development instead of the `post_search_data_items` interceptor. + When both interceptors are used, this `post_search_data_items_with_metadata` interceptor runs after the + `post_search_data_items` interceptor. The (possibly modified) response returned by + `post_search_data_items` will be passed to + `post_search_data_items_with_metadata`. + """ + return response, metadata + async def pre_update_dataset( self, request: dataset_service.UpdateDatasetRequest, @@ -708,12 +1116,35 @@ async def post_update_dataset( ) -> gca_dataset.Dataset: """Post-rpc interceptor for update_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_update_dataset` interceptor runs + before the `post_update_dataset_with_metadata` interceptor. """ return response + async def post_update_dataset_with_metadata( + self, + response: gca_dataset.Dataset, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_dataset.Dataset, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_update_dataset_with_metadata` + interceptor in new development instead of the `post_update_dataset` interceptor. + When both interceptors are used, this `post_update_dataset_with_metadata` interceptor runs after the + `post_update_dataset` interceptor. The (possibly modified) response returned by + `post_update_dataset` will be passed to + `post_update_dataset_with_metadata`. + """ + return response, metadata + async def pre_update_dataset_version( self, request: dataset_service.UpdateDatasetVersionRequest, @@ -734,12 +1165,37 @@ async def post_update_dataset_version( ) -> gca_dataset_version.DatasetVersion: """Post-rpc interceptor for update_dataset_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_dataset_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatasetService server but before - it is returned to user code. + it is returned to user code. This `post_update_dataset_version` interceptor runs + before the `post_update_dataset_version_with_metadata` interceptor. """ return response + async def post_update_dataset_version_with_metadata( + self, + response: gca_dataset_version.DatasetVersion, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_dataset_version.DatasetVersion, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for update_dataset_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatasetService server but before it is returned to user code. + + We recommend only using this `post_update_dataset_version_with_metadata` + interceptor in new development instead of the `post_update_dataset_version` interceptor. + When both interceptors are used, this `post_update_dataset_version_with_metadata` interceptor runs after the + `post_update_dataset_version` interceptor. The (possibly modified) response returned by + `post_update_dataset_version` will be passed to + `post_update_dataset_version_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1049,9 +1505,9 @@ def __init__( self._interceptor = interceptor or AsyncDatasetServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -1296,7 +1752,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1345,6 +1801,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1458,7 +1918,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1505,6 +1965,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1612,7 +2076,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1660,6 +2124,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1768,7 +2236,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1814,6 +2282,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1922,7 +2394,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1970,6 +2442,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_saved_query(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_saved_query_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2086,7 +2562,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2133,6 +2609,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_export_data(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_export_data_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2288,6 +2768,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_annotation_spec(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_annotation_spec_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2355,7 +2839,6 @@ async def __call__( request (~.dataset_service.GetDatasetRequest): The request object. Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - Next ID: 4 retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2443,6 +2926,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2511,7 +2998,6 @@ async def __call__( request (~.dataset_service.GetDatasetVersionRequest): The request object. Request message for [DatasetService.GetDatasetVersion][google.cloud.aiplatform.v1.DatasetService.GetDatasetVersion]. - Next ID: 4 retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2597,6 +3083,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2713,7 +3203,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2760,6 +3250,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_import_data(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_import_data_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2915,6 +3409,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_annotations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_annotations_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3071,6 +3569,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_data_items(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_data_items_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3227,6 +3729,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_datasets(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_datasets_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3382,6 +3888,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_dataset_versions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_dataset_versions_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3539,6 +4049,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_saved_queries(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_saved_queries_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3649,7 +4163,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3695,6 +4209,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_restore_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_restore_dataset_version_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3850,6 +4370,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_data_items(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_search_data_items_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4012,6 +4536,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4169,6 +4697,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_dataset_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_dataset_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4366,6 +4898,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -4502,6 +5038,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -4510,6 +5050,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4550,6 +5098,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -4712,6 +5264,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -4740,6 +5296,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -4852,6 +5412,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -4860,6 +5424,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4920,6 +5492,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -5082,6 +5658,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -5122,6 +5702,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -5234,6 +5818,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -5242,6 +5830,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5302,6 +5898,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -5460,6 +6060,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -5500,6 +6104,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -5612,6 +6220,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -5632,6 +6248,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -5680,6 +6300,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -5846,6 +6470,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -5878,6 +6506,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -5986,6 +6618,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -5994,6 +6630,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6054,6 +6698,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -6256,7 +6904,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -6408,7 +7055,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -6560,7 +7206,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -6713,7 +7358,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -6871,7 +7515,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -7022,7 +7665,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -7147,7 +7789,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -7271,7 +7912,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -7424,7 +8064,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -7576,7 +8215,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py index 484c11a412..599ead0a73 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1224,6 +1224,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -1310,6 +1314,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -1388,6 +1397,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -1569,6 +1582,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1705,6 +1722,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1713,6 +1734,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1753,6 +1782,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1934,6 +1967,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1962,6 +1999,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2074,6 +2115,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2082,6 +2127,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2142,6 +2195,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2323,6 +2380,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2363,6 +2424,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2475,6 +2540,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2483,6 +2552,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2543,6 +2620,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2720,6 +2801,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2760,6 +2845,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2872,6 +2961,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2892,6 +2989,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2940,6 +3041,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3125,6 +3230,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3157,6 +3266,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3265,6 +3378,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3273,6 +3390,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3333,6 +3458,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/__init__.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/__init__.py index 2c094b9fd0..1a6e5f48e7 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py index 92c31a5ef4..26640fe511 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers from google.cloud.aiplatform_v1.types import deployment_resource_pool from google.cloud.aiplatform_v1.types import ( @@ -60,9 +59,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DeploymentResourcePoolServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import DeploymentResourcePoolServiceGrpcAsyncIOTransport from .client import DeploymentResourcePoolServiceClient @@ -155,7 +156,12 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: DeploymentResourcePoolServiceAsyncClient: The constructed client. """ - return DeploymentResourcePoolServiceClient.from_service_account_info.__func__(DeploymentResourcePoolServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + DeploymentResourcePoolServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func( + DeploymentResourcePoolServiceAsyncClient, info, *args, **kwargs + ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -171,7 +177,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: DeploymentResourcePoolServiceAsyncClient: The constructed client. """ - return DeploymentResourcePoolServiceClient.from_service_account_file.__func__(DeploymentResourcePoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + DeploymentResourcePoolServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + DeploymentResourcePoolServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file @@ -316,21 +327,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.DeploymentResourcePoolServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "credentialsType": None, + } + ), ) async def create_deployment_resource_pool( @@ -438,8 +451,13 @@ async def sample_create_deployment_resource_pool(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, deployment_resource_pool, deployment_resource_pool_id] + flattened_params = [ + parent, + deployment_resource_pool, + deployment_resource_pool_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -574,7 +592,10 @@ async def sample_get_deployment_resource_pool(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -696,7 +717,10 @@ async def sample_list_deployment_resource_pools(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -849,7 +873,10 @@ async def sample_update_deployment_resource_pool(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([deployment_resource_pool, update_mask]) + flattened_params = [deployment_resource_pool, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1001,7 +1028,10 @@ async def sample_delete_deployment_resource_pool(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1132,7 +1162,10 @@ async def sample_query_deployed_models(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([deployment_resource_pool]) + flattened_params = [deployment_resource_pool] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1900,5 +1933,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("DeploymentResourcePoolServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py index 13313ad94e..9c59404b0b 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.deployment_resource_pool_service import pagers from google.cloud.aiplatform_v1.types import deployment_resource_pool from google.cloud.aiplatform_v1.types import ( @@ -74,9 +75,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import DeploymentResourcePoolServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import DeploymentResourcePoolServiceGrpcTransport from .transports.grpc_asyncio import DeploymentResourcePoolServiceGrpcAsyncIOTransport @@ -103,14 +106,14 @@ class DeploymentResourcePoolServiceClientMeta(type): OrderedDict() ) # type: Dict[str, Type[DeploymentResourcePoolServiceTransport]] _transport_registry["grpc"] = DeploymentResourcePoolServiceGrpcTransport - _transport_registry[ - "grpc_asyncio" - ] = DeploymentResourcePoolServiceGrpcAsyncIOTransport + _transport_registry["grpc_asyncio"] = ( + DeploymentResourcePoolServiceGrpcAsyncIOTransport + ) _transport_registry["rest"] = DeploymentResourcePoolServiceRestTransport if HAS_ASYNC_REST_DEPENDENCIES: # pragma: NO COVER - _transport_registry[ - "rest_asyncio" - ] = AsyncDeploymentResourcePoolServiceRestTransport + _transport_registry["rest_asyncio"] = ( + AsyncDeploymentResourcePoolServiceRestTransport + ) def get_transport_class( cls, @@ -182,6 +185,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -435,12 +466,10 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = ( + DeploymentResourcePoolServiceClient._use_client_cert_effective() + ) use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -448,7 +477,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -480,20 +509,16 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = ( + DeploymentResourcePoolServiceClient._use_client_cert_effective() + ) use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -588,6 +613,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -680,11 +732,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = DeploymentResourcePoolServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + DeploymentResourcePoolServiceClient._read_environment_variables() + ) self._client_cert_source = ( DeploymentResourcePoolServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert @@ -805,21 +855,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.DeploymentResourcePoolServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", + "credentialsType": None, + } + ), ) def create_deployment_resource_pool( @@ -927,8 +981,13 @@ def sample_create_deployment_resource_pool(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, deployment_resource_pool, deployment_resource_pool_id] + flattened_params = [ + parent, + deployment_resource_pool, + deployment_resource_pool_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -1062,7 +1121,10 @@ def sample_get_deployment_resource_pool(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1183,7 +1245,10 @@ def sample_list_deployment_resource_pools(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1335,7 +1400,10 @@ def sample_update_deployment_resource_pool(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([deployment_resource_pool, update_mask]) + flattened_params = [deployment_resource_pool, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1486,7 +1554,10 @@ def sample_delete_deployment_resource_pool(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1616,7 +1687,10 @@ def sample_query_deployed_models(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([deployment_resource_pool]) + flattened_params = [deployment_resource_pool] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1730,16 +1804,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1785,16 +1863,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1957,16 +2039,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -2078,16 +2164,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -2200,16 +2290,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -2260,16 +2354,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -2315,16 +2413,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -2370,21 +2472,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("DeploymentResourcePoolServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/pagers.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/pagers.py index 9a9ac24f75..44366d74de 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/__init__.py index 4bb70cbfe9..dad07c12b9 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -45,9 +45,9 @@ _transport_registry["grpc_asyncio"] = DeploymentResourcePoolServiceGrpcAsyncIOTransport _transport_registry["rest"] = DeploymentResourcePoolServiceRestTransport if HAS_REST_ASYNC: # pragma: NO COVER - _transport_registry[ - "rest_asyncio" - ] = AsyncDeploymentResourcePoolServiceRestTransport + _transport_registry["rest_asyncio"] = ( + AsyncDeploymentResourcePoolServiceRestTransport + ) __all__ = ( "DeploymentResourcePoolServiceTransport", diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py index 5cfd33956e..678831a458 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import deployment_resource_pool from google.cloud.aiplatform_v1.types import deployment_resource_pool_service @@ -38,6 +39,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class DeploymentResourcePoolServiceTransport(abc.ABC): """Abstract transport class for DeploymentResourcePoolService.""" @@ -69,9 +73,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -84,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -100,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -320,13 +328,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py index 695e775b19..afb0a660c6 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -76,12 +76,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.DeploymentResourcePoolService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -160,9 +159,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -296,9 +296,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -369,12 +370,12 @@ def create_deployment_resource_pool( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_deployment_resource_pool" not in self._stubs: - self._stubs[ - "create_deployment_resource_pool" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/CreateDeploymentResourcePool", - request_serializer=deployment_resource_pool_service.CreateDeploymentResourcePoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_deployment_resource_pool"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/CreateDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.CreateDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_deployment_resource_pool"] @@ -400,12 +401,12 @@ def get_deployment_resource_pool( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_deployment_resource_pool" not in self._stubs: - self._stubs[ - "get_deployment_resource_pool" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/GetDeploymentResourcePool", - request_serializer=deployment_resource_pool_service.GetDeploymentResourcePoolRequest.serialize, - response_deserializer=deployment_resource_pool.DeploymentResourcePool.deserialize, + self._stubs["get_deployment_resource_pool"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/GetDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.GetDeploymentResourcePoolRequest.serialize, + response_deserializer=deployment_resource_pool.DeploymentResourcePool.deserialize, + ) ) return self._stubs["get_deployment_resource_pool"] @@ -431,12 +432,12 @@ def list_deployment_resource_pools( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_deployment_resource_pools" not in self._stubs: - self._stubs[ - "list_deployment_resource_pools" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/ListDeploymentResourcePools", - request_serializer=deployment_resource_pool_service.ListDeploymentResourcePoolsRequest.serialize, - response_deserializer=deployment_resource_pool_service.ListDeploymentResourcePoolsResponse.deserialize, + self._stubs["list_deployment_resource_pools"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/ListDeploymentResourcePools", + request_serializer=deployment_resource_pool_service.ListDeploymentResourcePoolsRequest.serialize, + response_deserializer=deployment_resource_pool_service.ListDeploymentResourcePoolsResponse.deserialize, + ) ) return self._stubs["list_deployment_resource_pools"] @@ -463,12 +464,12 @@ def update_deployment_resource_pool( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_deployment_resource_pool" not in self._stubs: - self._stubs[ - "update_deployment_resource_pool" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/UpdateDeploymentResourcePool", - request_serializer=deployment_resource_pool_service.UpdateDeploymentResourcePoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_deployment_resource_pool"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/UpdateDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.UpdateDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_deployment_resource_pool"] @@ -495,12 +496,12 @@ def delete_deployment_resource_pool( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_deployment_resource_pool" not in self._stubs: - self._stubs[ - "delete_deployment_resource_pool" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/DeleteDeploymentResourcePool", - request_serializer=deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_deployment_resource_pool"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/DeleteDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_deployment_resource_pool"] diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py index 1ea68ac5dd..d18db93642 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -156,8 +156,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -208,9 +209,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -376,12 +378,12 @@ def create_deployment_resource_pool( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_deployment_resource_pool" not in self._stubs: - self._stubs[ - "create_deployment_resource_pool" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/CreateDeploymentResourcePool", - request_serializer=deployment_resource_pool_service.CreateDeploymentResourcePoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_deployment_resource_pool"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/CreateDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.CreateDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_deployment_resource_pool"] @@ -407,12 +409,12 @@ def get_deployment_resource_pool( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_deployment_resource_pool" not in self._stubs: - self._stubs[ - "get_deployment_resource_pool" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/GetDeploymentResourcePool", - request_serializer=deployment_resource_pool_service.GetDeploymentResourcePoolRequest.serialize, - response_deserializer=deployment_resource_pool.DeploymentResourcePool.deserialize, + self._stubs["get_deployment_resource_pool"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/GetDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.GetDeploymentResourcePoolRequest.serialize, + response_deserializer=deployment_resource_pool.DeploymentResourcePool.deserialize, + ) ) return self._stubs["get_deployment_resource_pool"] @@ -438,12 +440,12 @@ def list_deployment_resource_pools( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_deployment_resource_pools" not in self._stubs: - self._stubs[ - "list_deployment_resource_pools" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/ListDeploymentResourcePools", - request_serializer=deployment_resource_pool_service.ListDeploymentResourcePoolsRequest.serialize, - response_deserializer=deployment_resource_pool_service.ListDeploymentResourcePoolsResponse.deserialize, + self._stubs["list_deployment_resource_pools"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/ListDeploymentResourcePools", + request_serializer=deployment_resource_pool_service.ListDeploymentResourcePoolsRequest.serialize, + response_deserializer=deployment_resource_pool_service.ListDeploymentResourcePoolsResponse.deserialize, + ) ) return self._stubs["list_deployment_resource_pools"] @@ -470,12 +472,12 @@ def update_deployment_resource_pool( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_deployment_resource_pool" not in self._stubs: - self._stubs[ - "update_deployment_resource_pool" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/UpdateDeploymentResourcePool", - request_serializer=deployment_resource_pool_service.UpdateDeploymentResourcePoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_deployment_resource_pool"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/UpdateDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.UpdateDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_deployment_resource_pool"] @@ -502,12 +504,12 @@ def delete_deployment_resource_pool( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_deployment_resource_pool" not in self._stubs: - self._stubs[ - "delete_deployment_resource_pool" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/DeleteDeploymentResourcePool", - request_serializer=deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_deployment_resource_pool"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.DeploymentResourcePoolService/DeleteDeploymentResourcePool", + request_serializer=deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_deployment_resource_pool"] diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py index 350ff26032..3e1c4c6385 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -64,6 +65,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class DeploymentResourcePoolServiceRestInterceptor: """Interceptor for DeploymentResourcePoolService. @@ -154,12 +158,35 @@ def post_create_deployment_resource_pool( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_deployment_resource_pool - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_deployment_resource_pool_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_create_deployment_resource_pool` interceptor runs + before the `post_create_deployment_resource_pool_with_metadata` interceptor. """ return response + def post_create_deployment_resource_pool_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_deployment_resource_pool + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_create_deployment_resource_pool_with_metadata` + interceptor in new development instead of the `post_create_deployment_resource_pool` interceptor. + When both interceptors are used, this `post_create_deployment_resource_pool_with_metadata` interceptor runs after the + `post_create_deployment_resource_pool` interceptor. The (possibly modified) response returned by + `post_create_deployment_resource_pool` will be passed to + `post_create_deployment_resource_pool_with_metadata`. + """ + return response, metadata + def pre_delete_deployment_resource_pool( self, request: deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest, @@ -180,12 +207,35 @@ def post_delete_deployment_resource_pool( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_deployment_resource_pool - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_deployment_resource_pool_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_delete_deployment_resource_pool` interceptor runs + before the `post_delete_deployment_resource_pool_with_metadata` interceptor. """ return response + def post_delete_deployment_resource_pool_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_deployment_resource_pool + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_delete_deployment_resource_pool_with_metadata` + interceptor in new development instead of the `post_delete_deployment_resource_pool` interceptor. + When both interceptors are used, this `post_delete_deployment_resource_pool_with_metadata` interceptor runs after the + `post_delete_deployment_resource_pool` interceptor. The (possibly modified) response returned by + `post_delete_deployment_resource_pool` will be passed to + `post_delete_deployment_resource_pool_with_metadata`. + """ + return response, metadata + def pre_get_deployment_resource_pool( self, request: deployment_resource_pool_service.GetDeploymentResourcePoolRequest, @@ -206,12 +256,38 @@ def post_get_deployment_resource_pool( ) -> deployment_resource_pool.DeploymentResourcePool: """Post-rpc interceptor for get_deployment_resource_pool - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_deployment_resource_pool_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_get_deployment_resource_pool` interceptor runs + before the `post_get_deployment_resource_pool_with_metadata` interceptor. """ return response + def post_get_deployment_resource_pool_with_metadata( + self, + response: deployment_resource_pool.DeploymentResourcePool, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + deployment_resource_pool.DeploymentResourcePool, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_deployment_resource_pool + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_get_deployment_resource_pool_with_metadata` + interceptor in new development instead of the `post_get_deployment_resource_pool` interceptor. + When both interceptors are used, this `post_get_deployment_resource_pool_with_metadata` interceptor runs after the + `post_get_deployment_resource_pool` interceptor. The (possibly modified) response returned by + `post_get_deployment_resource_pool` will be passed to + `post_get_deployment_resource_pool_with_metadata`. + """ + return response, metadata + def pre_list_deployment_resource_pools( self, request: deployment_resource_pool_service.ListDeploymentResourcePoolsRequest, @@ -233,12 +309,38 @@ def post_list_deployment_resource_pools( ) -> deployment_resource_pool_service.ListDeploymentResourcePoolsResponse: """Post-rpc interceptor for list_deployment_resource_pools - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_deployment_resource_pools_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_list_deployment_resource_pools` interceptor runs + before the `post_list_deployment_resource_pools_with_metadata` interceptor. """ return response + def post_list_deployment_resource_pools_with_metadata( + self, + response: deployment_resource_pool_service.ListDeploymentResourcePoolsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_deployment_resource_pools + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_list_deployment_resource_pools_with_metadata` + interceptor in new development instead of the `post_list_deployment_resource_pools` interceptor. + When both interceptors are used, this `post_list_deployment_resource_pools_with_metadata` interceptor runs after the + `post_list_deployment_resource_pools` interceptor. The (possibly modified) response returned by + `post_list_deployment_resource_pools` will be passed to + `post_list_deployment_resource_pools_with_metadata`. + """ + return response, metadata + def pre_query_deployed_models( self, request: deployment_resource_pool_service.QueryDeployedModelsRequest, @@ -259,12 +361,38 @@ def post_query_deployed_models( ) -> deployment_resource_pool_service.QueryDeployedModelsResponse: """Post-rpc interceptor for query_deployed_models - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_query_deployed_models_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_query_deployed_models` interceptor runs + before the `post_query_deployed_models_with_metadata` interceptor. """ return response + def post_query_deployed_models_with_metadata( + self, + response: deployment_resource_pool_service.QueryDeployedModelsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + deployment_resource_pool_service.QueryDeployedModelsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for query_deployed_models + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_query_deployed_models_with_metadata` + interceptor in new development instead of the `post_query_deployed_models` interceptor. + When both interceptors are used, this `post_query_deployed_models_with_metadata` interceptor runs after the + `post_query_deployed_models` interceptor. The (possibly modified) response returned by + `post_query_deployed_models` will be passed to + `post_query_deployed_models_with_metadata`. + """ + return response, metadata + def pre_update_deployment_resource_pool( self, request: deployment_resource_pool_service.UpdateDeploymentResourcePoolRequest, @@ -285,12 +413,35 @@ def post_update_deployment_resource_pool( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_deployment_resource_pool - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_deployment_resource_pool_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_update_deployment_resource_pool` interceptor runs + before the `post_update_deployment_resource_pool_with_metadata` interceptor. """ return response + def post_update_deployment_resource_pool_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_deployment_resource_pool + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_update_deployment_resource_pool_with_metadata` + interceptor in new development instead of the `post_update_deployment_resource_pool` interceptor. + When both interceptors are used, this `post_update_deployment_resource_pool_with_metadata` interceptor runs after the + `post_update_deployment_resource_pool` interceptor. The (possibly modified) response returned by + `post_update_deployment_resource_pool` will be passed to + `post_update_deployment_resource_pool_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -582,9 +733,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -797,6 +949,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -933,6 +1089,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -941,6 +1101,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -981,6 +1149,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1143,6 +1315,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1171,6 +1347,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1283,6 +1463,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1291,6 +1475,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1351,6 +1543,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1513,6 +1709,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1553,6 +1753,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1665,6 +1869,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1673,6 +1881,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1733,6 +1949,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -1891,6 +2111,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1931,6 +2155,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2043,6 +2271,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2063,6 +2299,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2111,6 +2351,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2277,6 +2521,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2309,6 +2557,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2417,6 +2669,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2425,6 +2681,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2485,6 +2749,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2596,7 +2864,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2636,6 +2904,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_deployment_resource_pool(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_create_deployment_resource_pool_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2746,7 +3020,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2785,6 +3059,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_deployment_resource_pool(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_delete_deployment_resource_pool_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2937,6 +3217,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_deployment_resource_pool(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_deployment_resource_pool_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3097,6 +3381,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_deployment_resource_pools(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_list_deployment_resource_pools_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3250,6 +3540,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_query_deployed_models(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_query_deployed_models_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3367,7 +3661,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3407,6 +3701,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_deployment_resource_pool(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_update_deployment_resource_pool_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3538,7 +3838,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -3683,7 +3982,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3828,7 +4126,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -3974,7 +4271,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4124,7 +4420,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4269,7 +4564,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4386,7 +4680,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4503,7 +4796,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -4648,7 +4940,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -4791,7 +5082,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py index 8a50343c91..d0de7eb4cf 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -82,6 +82,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncDeploymentResourcePoolServiceRestInterceptor: """Asynchronous Interceptor for DeploymentResourcePoolService. @@ -172,12 +175,35 @@ async def post_create_deployment_resource_pool( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_deployment_resource_pool - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_deployment_resource_pool_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_create_deployment_resource_pool` interceptor runs + before the `post_create_deployment_resource_pool_with_metadata` interceptor. """ return response + async def post_create_deployment_resource_pool_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_deployment_resource_pool + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_create_deployment_resource_pool_with_metadata` + interceptor in new development instead of the `post_create_deployment_resource_pool` interceptor. + When both interceptors are used, this `post_create_deployment_resource_pool_with_metadata` interceptor runs after the + `post_create_deployment_resource_pool` interceptor. The (possibly modified) response returned by + `post_create_deployment_resource_pool` will be passed to + `post_create_deployment_resource_pool_with_metadata`. + """ + return response, metadata + async def pre_delete_deployment_resource_pool( self, request: deployment_resource_pool_service.DeleteDeploymentResourcePoolRequest, @@ -198,12 +224,35 @@ async def post_delete_deployment_resource_pool( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_deployment_resource_pool - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_deployment_resource_pool_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_delete_deployment_resource_pool` interceptor runs + before the `post_delete_deployment_resource_pool_with_metadata` interceptor. """ return response + async def post_delete_deployment_resource_pool_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_deployment_resource_pool + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_delete_deployment_resource_pool_with_metadata` + interceptor in new development instead of the `post_delete_deployment_resource_pool` interceptor. + When both interceptors are used, this `post_delete_deployment_resource_pool_with_metadata` interceptor runs after the + `post_delete_deployment_resource_pool` interceptor. The (possibly modified) response returned by + `post_delete_deployment_resource_pool` will be passed to + `post_delete_deployment_resource_pool_with_metadata`. + """ + return response, metadata + async def pre_get_deployment_resource_pool( self, request: deployment_resource_pool_service.GetDeploymentResourcePoolRequest, @@ -224,12 +273,38 @@ async def post_get_deployment_resource_pool( ) -> deployment_resource_pool.DeploymentResourcePool: """Post-rpc interceptor for get_deployment_resource_pool - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_deployment_resource_pool_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_get_deployment_resource_pool` interceptor runs + before the `post_get_deployment_resource_pool_with_metadata` interceptor. """ return response + async def post_get_deployment_resource_pool_with_metadata( + self, + response: deployment_resource_pool.DeploymentResourcePool, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + deployment_resource_pool.DeploymentResourcePool, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_deployment_resource_pool + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_get_deployment_resource_pool_with_metadata` + interceptor in new development instead of the `post_get_deployment_resource_pool` interceptor. + When both interceptors are used, this `post_get_deployment_resource_pool_with_metadata` interceptor runs after the + `post_get_deployment_resource_pool` interceptor. The (possibly modified) response returned by + `post_get_deployment_resource_pool` will be passed to + `post_get_deployment_resource_pool_with_metadata`. + """ + return response, metadata + async def pre_list_deployment_resource_pools( self, request: deployment_resource_pool_service.ListDeploymentResourcePoolsRequest, @@ -251,12 +326,38 @@ async def post_list_deployment_resource_pools( ) -> deployment_resource_pool_service.ListDeploymentResourcePoolsResponse: """Post-rpc interceptor for list_deployment_resource_pools - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_deployment_resource_pools_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_list_deployment_resource_pools` interceptor runs + before the `post_list_deployment_resource_pools_with_metadata` interceptor. """ return response + async def post_list_deployment_resource_pools_with_metadata( + self, + response: deployment_resource_pool_service.ListDeploymentResourcePoolsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + deployment_resource_pool_service.ListDeploymentResourcePoolsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_deployment_resource_pools + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_list_deployment_resource_pools_with_metadata` + interceptor in new development instead of the `post_list_deployment_resource_pools` interceptor. + When both interceptors are used, this `post_list_deployment_resource_pools_with_metadata` interceptor runs after the + `post_list_deployment_resource_pools` interceptor. The (possibly modified) response returned by + `post_list_deployment_resource_pools` will be passed to + `post_list_deployment_resource_pools_with_metadata`. + """ + return response, metadata + async def pre_query_deployed_models( self, request: deployment_resource_pool_service.QueryDeployedModelsRequest, @@ -277,12 +378,38 @@ async def post_query_deployed_models( ) -> deployment_resource_pool_service.QueryDeployedModelsResponse: """Post-rpc interceptor for query_deployed_models - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_query_deployed_models_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_query_deployed_models` interceptor runs + before the `post_query_deployed_models_with_metadata` interceptor. """ return response + async def post_query_deployed_models_with_metadata( + self, + response: deployment_resource_pool_service.QueryDeployedModelsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + deployment_resource_pool_service.QueryDeployedModelsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for query_deployed_models + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_query_deployed_models_with_metadata` + interceptor in new development instead of the `post_query_deployed_models` interceptor. + When both interceptors are used, this `post_query_deployed_models_with_metadata` interceptor runs after the + `post_query_deployed_models` interceptor. The (possibly modified) response returned by + `post_query_deployed_models` will be passed to + `post_query_deployed_models_with_metadata`. + """ + return response, metadata + async def pre_update_deployment_resource_pool( self, request: deployment_resource_pool_service.UpdateDeploymentResourcePoolRequest, @@ -303,12 +430,35 @@ async def post_update_deployment_resource_pool( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_deployment_resource_pool - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_deployment_resource_pool_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DeploymentResourcePoolService server but before - it is returned to user code. + it is returned to user code. This `post_update_deployment_resource_pool` interceptor runs + before the `post_update_deployment_resource_pool_with_metadata` interceptor. """ return response + async def post_update_deployment_resource_pool_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_deployment_resource_pool + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DeploymentResourcePoolService server but before it is returned to user code. + + We recommend only using this `post_update_deployment_resource_pool_with_metadata` + interceptor in new development instead of the `post_update_deployment_resource_pool` interceptor. + When both interceptors are used, this `post_update_deployment_resource_pool_with_metadata` interceptor runs after the + `post_update_deployment_resource_pool` interceptor. The (possibly modified) response returned by + `post_update_deployment_resource_pool` will be passed to + `post_update_deployment_resource_pool_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -621,9 +771,9 @@ def __init__( ) self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -783,11 +933,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseCreateDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseCreateDeploymentResourcePool._get_transcoded_request( http_options, request @@ -810,7 +959,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -857,6 +1006,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_deployment_resource_pool(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_deployment_resource_pool_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -948,11 +1103,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseDeleteDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseDeleteDeploymentResourcePool._get_transcoded_request( http_options, request @@ -971,7 +1125,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1017,6 +1171,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_deployment_resource_pool(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_deployment_resource_pool_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1109,11 +1269,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseGetDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseGetDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1178,6 +1337,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_deployment_resource_pool(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_deployment_resource_pool_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1272,11 +1437,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseListDeploymentResourcePools._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_deployment_resource_pools( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_deployment_resource_pools( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseListDeploymentResourcePools._get_transcoded_request( http_options, request @@ -1347,6 +1511,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_deployment_resource_pools(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_deployment_resource_pools_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1506,6 +1676,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_deployed_models(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_query_deployed_models_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1600,11 +1774,10 @@ async def __call__( _BaseDeploymentResourcePoolServiceRestTransport._BaseUpdateDeploymentResourcePool._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_deployment_resource_pool( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_deployment_resource_pool( + request, metadata + ) ) transcoded_request = _BaseDeploymentResourcePoolServiceRestTransport._BaseUpdateDeploymentResourcePool._get_transcoded_request( http_options, request @@ -1627,7 +1800,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1674,6 +1847,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_deployment_resource_pool(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_update_deployment_resource_pool_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1869,6 +2048,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -2005,6 +2188,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -2013,6 +2200,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2053,6 +2248,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2215,6 +2414,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2243,6 +2446,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2355,6 +2562,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2363,6 +2574,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2423,6 +2642,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2585,6 +2808,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2625,6 +2852,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2737,6 +2968,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2745,6 +2980,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2805,6 +3048,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2963,6 +3210,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -3003,6 +3254,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -3115,6 +3370,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3135,6 +3398,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -3183,6 +3450,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3349,6 +3620,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3381,6 +3656,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3489,6 +3768,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3497,6 +3780,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3557,6 +3848,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3671,7 +3966,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -3822,7 +4116,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3973,7 +4266,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4125,7 +4417,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4283,7 +4574,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4436,7 +4726,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4561,7 +4850,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4684,7 +4972,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -4837,7 +5124,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -4988,7 +5274,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_base.py index 5c68ca3e0a..2effba5534 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -543,6 +543,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -629,6 +633,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -707,6 +716,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -888,6 +901,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1024,6 +1041,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1032,6 +1053,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1072,6 +1101,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1253,6 +1286,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1281,6 +1318,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1393,6 +1434,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1401,6 +1446,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1461,6 +1514,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1642,6 +1699,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1682,6 +1743,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1794,6 +1859,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1802,6 +1871,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1862,6 +1939,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2039,6 +2120,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2079,6 +2164,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2191,6 +2280,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2211,6 +2308,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2259,6 +2360,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2444,6 +2549,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2476,6 +2585,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2584,6 +2697,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2592,6 +2709,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2652,6 +2777,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py b/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py index 7990d4b872..820efd7010 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 5beb64dfde..f3a2b094ea 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import endpoint @@ -57,9 +56,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport from .client import EndpointServiceClient @@ -144,7 +145,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: EndpointServiceAsyncClient: The constructed client. """ - return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + EndpointServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(EndpointServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -160,7 +164,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: EndpointServiceAsyncClient: The constructed client. """ - return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + EndpointServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(EndpointServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -303,21 +310,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.EndpointServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.EndpointService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.EndpointService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.EndpointService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.EndpointService", + "credentialsType": None, + } + ), ) async def create_endpoint( @@ -426,7 +435,10 @@ async def sample_create_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint, endpoint_id]) + flattened_params = [parent, endpoint, endpoint_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -547,7 +559,10 @@ async def sample_get_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -660,7 +675,10 @@ async def sample_list_endpoints(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -791,7 +809,10 @@ async def sample_update_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) + flattened_params = [endpoint, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -916,7 +937,10 @@ async def sample_update_endpoint_long_running(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint]) + flattened_params = [endpoint] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1049,7 +1073,10 @@ async def sample_delete_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1132,7 +1159,6 @@ async def sample_deploy_model(): # Initialize request argument(s) deployed_model = aiplatform_v1.DeployedModel() deployed_model.dedicated_resources.min_replica_count = 1803 - deployed_model.model = "model_value" request = aiplatform_v1.DeployModelRequest( endpoint="endpoint_value", @@ -1211,7 +1237,10 @@ async def sample_deploy_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) + flattened_params = [endpoint, deployed_model, traffic_split] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1365,7 +1394,10 @@ async def sample_undeploy_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + flattened_params = [endpoint, deployed_model_id, traffic_split] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1436,8 +1468,9 @@ async def mutate_deployed_model( ) -> operation_async.AsyncOperation: r"""Updates an existing deployed model. Updatable fields include ``min_replica_count``, ``max_replica_count``, - ``autoscaling_metric_specs``, ``disable_container_logging`` (v1 - only), and ``enable_container_logging`` (v1beta1 only). + ``required_replica_count``, ``autoscaling_metric_specs``, + ``disable_container_logging`` (v1 only), and + ``enable_container_logging`` (v1beta1 only). .. code-block:: python @@ -1457,7 +1490,6 @@ async def sample_mutate_deployed_model(): # Initialize request argument(s) deployed_model = aiplatform_v1.DeployedModel() deployed_model.dedicated_resources.min_replica_count = 1803 - deployed_model.model = "model_value" request = aiplatform_v1.MutateDeployedModelRequest( endpoint="endpoint_value", @@ -1490,17 +1522,19 @@ async def sample_mutate_deployed_model(): Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: - - ``min_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - ``max_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] - - ``disable_container_logging`` (v1 only) - - ``enable_container_logging`` (v1beta1 only) + - ``min_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``max_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``required_replica_count`` in + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] + - ``disable_container_logging`` (v1 only) + - ``enable_container_logging`` (v1beta1 only) This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this @@ -1531,7 +1565,10 @@ async def sample_mutate_deployed_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, update_mask]) + flattened_params = [endpoint, deployed_model, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2294,5 +2331,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("EndpointServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index 8185697f63..766cf26442 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import endpoint @@ -71,9 +72,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import EndpointServiceGrpcTransport from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport @@ -173,6 +176,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -467,12 +498,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = EndpointServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -480,7 +507,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -512,20 +539,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = EndpointServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -618,6 +639,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -708,11 +756,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = EndpointServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + EndpointServiceClient._read_environment_variables() + ) self._client_cert_source = EndpointServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -824,21 +870,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.EndpointServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.EndpointService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.EndpointService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.EndpointService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.EndpointService", + "credentialsType": None, + } + ), ) def create_endpoint( @@ -947,7 +997,10 @@ def sample_create_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint, endpoint_id]) + flattened_params = [parent, endpoint, endpoint_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1065,7 +1118,10 @@ def sample_get_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1175,7 +1231,10 @@ def sample_list_endpoints(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1303,7 +1362,10 @@ def sample_update_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) + flattened_params = [endpoint, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1425,7 +1487,10 @@ def sample_update_endpoint_long_running(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint]) + flattened_params = [endpoint] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1557,7 +1622,10 @@ def sample_delete_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1637,7 +1705,6 @@ def sample_deploy_model(): # Initialize request argument(s) deployed_model = aiplatform_v1.DeployedModel() deployed_model.dedicated_resources.min_replica_count = 1803 - deployed_model.model = "model_value" request = aiplatform_v1.DeployModelRequest( endpoint="endpoint_value", @@ -1716,7 +1783,10 @@ def sample_deploy_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) + flattened_params = [endpoint, deployed_model, traffic_split] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1866,7 +1936,10 @@ def sample_undeploy_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + flattened_params = [endpoint, deployed_model_id, traffic_split] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1933,8 +2006,9 @@ def mutate_deployed_model( ) -> gac_operation.Operation: r"""Updates an existing deployed model. Updatable fields include ``min_replica_count``, ``max_replica_count``, - ``autoscaling_metric_specs``, ``disable_container_logging`` (v1 - only), and ``enable_container_logging`` (v1beta1 only). + ``required_replica_count``, ``autoscaling_metric_specs``, + ``disable_container_logging`` (v1 only), and + ``enable_container_logging`` (v1beta1 only). .. code-block:: python @@ -1954,7 +2028,6 @@ def sample_mutate_deployed_model(): # Initialize request argument(s) deployed_model = aiplatform_v1.DeployedModel() deployed_model.dedicated_resources.min_replica_count = 1803 - deployed_model.model = "model_value" request = aiplatform_v1.MutateDeployedModelRequest( endpoint="endpoint_value", @@ -1987,17 +2060,19 @@ def sample_mutate_deployed_model(): Required. The DeployedModel to be mutated within the Endpoint. Only the following fields can be mutated: - - ``min_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - ``max_replica_count`` in either - [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] - or - [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] - - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] - - ``disable_container_logging`` (v1 only) - - ``enable_container_logging`` (v1beta1 only) + - ``min_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``max_replica_count`` in either + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + or + [AutomaticResources][google.cloud.aiplatform.v1.AutomaticResources] + - ``required_replica_count`` in + [DedicatedResources][google.cloud.aiplatform.v1.DedicatedResources] + - [autoscaling_metric_specs][google.cloud.aiplatform.v1.DedicatedResources.autoscaling_metric_specs] + - ``disable_container_logging`` (v1 only) + - ``enable_container_logging`` (v1beta1 only) This corresponds to the ``deployed_model`` field on the ``request`` instance; if ``request`` is provided, this @@ -2028,7 +2103,10 @@ def sample_mutate_deployed_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, update_mask]) + flattened_params = [endpoint, deployed_model, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2137,16 +2215,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -2192,16 +2274,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -2364,16 +2450,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -2485,16 +2575,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -2607,16 +2701,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -2667,16 +2765,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -2722,16 +2824,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -2777,21 +2883,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("EndpointServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py b/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py index 68933ca1ea..4bd48bf3a7 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py index bbc4b02b9c..77d57ff6c3 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py index d74b91774b..901393d915 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import endpoint from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint @@ -39,6 +40,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class EndpointServiceTransport(abc.ABC): """Abstract transport class for EndpointService.""" @@ -70,9 +74,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -85,8 +90,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -101,11 +104,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -355,13 +363,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py index 7e9bad2b07..e083f48443 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -77,12 +77,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.EndpointService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -159,9 +158,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -295,9 +295,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -472,12 +473,12 @@ def update_endpoint_long_running( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_endpoint_long_running" not in self._stubs: - self._stubs[ - "update_endpoint_long_running" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.EndpointService/UpdateEndpointLongRunning", - request_serializer=endpoint_service.UpdateEndpointLongRunningRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_endpoint_long_running"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.EndpointService/UpdateEndpointLongRunning", + request_serializer=endpoint_service.UpdateEndpointLongRunningRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_endpoint_long_running"] @@ -572,8 +573,9 @@ def mutate_deployed_model( Updates an existing deployed model. Updatable fields include ``min_replica_count``, ``max_replica_count``, - ``autoscaling_metric_specs``, ``disable_container_logging`` (v1 - only), and ``enable_container_logging`` (v1beta1 only). + ``required_replica_count``, ``autoscaling_metric_specs``, + ``disable_container_logging`` (v1 only), and + ``enable_container_logging`` (v1beta1 only). Returns: Callable[[~.MutateDeployedModelRequest], diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py index f56c525b8a..ed45bd1701 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -155,8 +155,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -207,9 +208,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -485,12 +487,12 @@ def update_endpoint_long_running( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_endpoint_long_running" not in self._stubs: - self._stubs[ - "update_endpoint_long_running" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.EndpointService/UpdateEndpointLongRunning", - request_serializer=endpoint_service.UpdateEndpointLongRunningRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_endpoint_long_running"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.EndpointService/UpdateEndpointLongRunning", + request_serializer=endpoint_service.UpdateEndpointLongRunningRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_endpoint_long_running"] @@ -592,8 +594,9 @@ def mutate_deployed_model( Updates an existing deployed model. Updatable fields include ``min_replica_count``, ``max_replica_count``, - ``autoscaling_metric_specs``, ``disable_container_logging`` (v1 - only), and ``enable_container_logging`` (v1beta1 only). + ``required_replica_count``, ``autoscaling_metric_specs``, + ``disable_container_logging`` (v1 only), and + ``enable_container_logging`` (v1beta1 only). Returns: Callable[[~.MutateDeployedModelRequest], diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py index 4ec406ce3b..2f0afe4e52 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -65,6 +66,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class EndpointServiceRestInterceptor: """Interceptor for EndpointService. @@ -178,12 +182,35 @@ def post_create_endpoint( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_create_endpoint` interceptor runs + before the `post_create_endpoint_with_metadata` interceptor. """ return response + def post_create_endpoint_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_create_endpoint_with_metadata` + interceptor in new development instead of the `post_create_endpoint` interceptor. + When both interceptors are used, this `post_create_endpoint_with_metadata` interceptor runs after the + `post_create_endpoint` interceptor. The (possibly modified) response returned by + `post_create_endpoint` will be passed to + `post_create_endpoint_with_metadata`. + """ + return response, metadata + def pre_delete_endpoint( self, request: endpoint_service.DeleteEndpointRequest, @@ -203,12 +230,35 @@ def post_delete_endpoint( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_delete_endpoint` interceptor runs + before the `post_delete_endpoint_with_metadata` interceptor. """ return response + def post_delete_endpoint_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_delete_endpoint_with_metadata` + interceptor in new development instead of the `post_delete_endpoint` interceptor. + When both interceptors are used, this `post_delete_endpoint_with_metadata` interceptor runs after the + `post_delete_endpoint` interceptor. The (possibly modified) response returned by + `post_delete_endpoint` will be passed to + `post_delete_endpoint_with_metadata`. + """ + return response, metadata + def pre_deploy_model( self, request: endpoint_service.DeployModelRequest, @@ -228,12 +278,35 @@ def post_deploy_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for deploy_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_deploy_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_deploy_model` interceptor runs + before the `post_deploy_model_with_metadata` interceptor. """ return response + def post_deploy_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for deploy_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_deploy_model_with_metadata` + interceptor in new development instead of the `post_deploy_model` interceptor. + When both interceptors are used, this `post_deploy_model_with_metadata` interceptor runs after the + `post_deploy_model` interceptor. The (possibly modified) response returned by + `post_deploy_model` will be passed to + `post_deploy_model_with_metadata`. + """ + return response, metadata + def pre_get_endpoint( self, request: endpoint_service.GetEndpointRequest, @@ -251,12 +324,35 @@ def pre_get_endpoint( def post_get_endpoint(self, response: endpoint.Endpoint) -> endpoint.Endpoint: """Post-rpc interceptor for get_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_get_endpoint` interceptor runs + before the `post_get_endpoint_with_metadata` interceptor. """ return response + def post_get_endpoint_with_metadata( + self, + response: endpoint.Endpoint, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[endpoint.Endpoint, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_get_endpoint_with_metadata` + interceptor in new development instead of the `post_get_endpoint` interceptor. + When both interceptors are used, this `post_get_endpoint_with_metadata` interceptor runs after the + `post_get_endpoint` interceptor. The (possibly modified) response returned by + `post_get_endpoint` will be passed to + `post_get_endpoint_with_metadata`. + """ + return response, metadata + def pre_list_endpoints( self, request: endpoint_service.ListEndpointsRequest, @@ -276,12 +372,37 @@ def post_list_endpoints( ) -> endpoint_service.ListEndpointsResponse: """Post-rpc interceptor for list_endpoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_endpoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_list_endpoints` interceptor runs + before the `post_list_endpoints_with_metadata` interceptor. """ return response + def post_list_endpoints_with_metadata( + self, + response: endpoint_service.ListEndpointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + endpoint_service.ListEndpointsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_endpoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_list_endpoints_with_metadata` + interceptor in new development instead of the `post_list_endpoints` interceptor. + When both interceptors are used, this `post_list_endpoints_with_metadata` interceptor runs after the + `post_list_endpoints` interceptor. The (possibly modified) response returned by + `post_list_endpoints` will be passed to + `post_list_endpoints_with_metadata`. + """ + return response, metadata + def pre_mutate_deployed_model( self, request: endpoint_service.MutateDeployedModelRequest, @@ -302,12 +423,35 @@ def post_mutate_deployed_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for mutate_deployed_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_mutate_deployed_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_mutate_deployed_model` interceptor runs + before the `post_mutate_deployed_model_with_metadata` interceptor. """ return response + def post_mutate_deployed_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for mutate_deployed_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_mutate_deployed_model_with_metadata` + interceptor in new development instead of the `post_mutate_deployed_model` interceptor. + When both interceptors are used, this `post_mutate_deployed_model_with_metadata` interceptor runs after the + `post_mutate_deployed_model` interceptor. The (possibly modified) response returned by + `post_mutate_deployed_model` will be passed to + `post_mutate_deployed_model_with_metadata`. + """ + return response, metadata + def pre_undeploy_model( self, request: endpoint_service.UndeployModelRequest, @@ -327,12 +471,35 @@ def post_undeploy_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for undeploy_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_undeploy_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_undeploy_model` interceptor runs + before the `post_undeploy_model_with_metadata` interceptor. """ return response + def post_undeploy_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for undeploy_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_undeploy_model_with_metadata` + interceptor in new development instead of the `post_undeploy_model` interceptor. + When both interceptors are used, this `post_undeploy_model_with_metadata` interceptor runs after the + `post_undeploy_model` interceptor. The (possibly modified) response returned by + `post_undeploy_model` will be passed to + `post_undeploy_model_with_metadata`. + """ + return response, metadata + def pre_update_endpoint( self, request: endpoint_service.UpdateEndpointRequest, @@ -352,12 +519,35 @@ def post_update_endpoint( ) -> gca_endpoint.Endpoint: """Post-rpc interceptor for update_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_update_endpoint` interceptor runs + before the `post_update_endpoint_with_metadata` interceptor. """ return response + def post_update_endpoint_with_metadata( + self, + response: gca_endpoint.Endpoint, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_endpoint.Endpoint, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_update_endpoint_with_metadata` + interceptor in new development instead of the `post_update_endpoint` interceptor. + When both interceptors are used, this `post_update_endpoint_with_metadata` interceptor runs after the + `post_update_endpoint` interceptor. The (possibly modified) response returned by + `post_update_endpoint` will be passed to + `post_update_endpoint_with_metadata`. + """ + return response, metadata + def pre_update_endpoint_long_running( self, request: endpoint_service.UpdateEndpointLongRunningRequest, @@ -378,12 +568,35 @@ def post_update_endpoint_long_running( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_endpoint_long_running - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_endpoint_long_running_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_update_endpoint_long_running` interceptor runs + before the `post_update_endpoint_long_running_with_metadata` interceptor. """ return response + def post_update_endpoint_long_running_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_endpoint_long_running + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_update_endpoint_long_running_with_metadata` + interceptor in new development instead of the `post_update_endpoint_long_running` interceptor. + When both interceptors are used, this `post_update_endpoint_long_running_with_metadata` interceptor runs after the + `post_update_endpoint_long_running` interceptor. The (possibly modified) response returned by + `post_update_endpoint_long_running` will be passed to + `post_update_endpoint_long_running_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -673,9 +886,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -886,6 +1100,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1022,6 +1240,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1030,6 +1252,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1070,6 +1300,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1232,6 +1466,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1260,6 +1498,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1372,6 +1614,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1380,6 +1626,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1440,6 +1694,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1602,6 +1860,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1642,6 +1904,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1754,6 +2020,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1762,6 +2032,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1822,6 +2100,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -1980,6 +2262,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2020,6 +2306,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2132,6 +2422,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2152,6 +2450,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2200,6 +2502,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2366,6 +2672,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2398,6 +2708,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2506,6 +2820,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2514,6 +2832,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2574,6 +2900,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2679,7 +3009,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2719,6 +3049,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2823,7 +3157,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2862,6 +3196,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2971,7 +3309,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3011,6 +3349,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_deploy_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_deploy_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3156,6 +3498,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3300,6 +3646,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_endpoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_endpoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3414,7 +3764,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3454,6 +3804,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_mutate_deployed_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_mutate_deployed_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3563,7 +3917,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3603,6 +3957,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_undeploy_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_undeploy_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3754,6 +4112,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3867,7 +4229,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3909,6 +4271,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_endpoint_long_running(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_endpoint_long_running_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4051,7 +4417,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -4193,7 +4558,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -4335,7 +4699,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4478,7 +4841,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4626,7 +4988,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4770,7 +5131,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4886,7 +5246,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -5002,7 +5361,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -5144,7 +5502,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -5286,7 +5643,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py index e0af0b0040..46352cdb44 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -83,6 +83,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncEndpointServiceRestInterceptor: """Asynchronous Interceptor for EndpointService. @@ -196,12 +199,35 @@ async def post_create_endpoint( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_create_endpoint` interceptor runs + before the `post_create_endpoint_with_metadata` interceptor. """ return response + async def post_create_endpoint_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_create_endpoint_with_metadata` + interceptor in new development instead of the `post_create_endpoint` interceptor. + When both interceptors are used, this `post_create_endpoint_with_metadata` interceptor runs after the + `post_create_endpoint` interceptor. The (possibly modified) response returned by + `post_create_endpoint` will be passed to + `post_create_endpoint_with_metadata`. + """ + return response, metadata + async def pre_delete_endpoint( self, request: endpoint_service.DeleteEndpointRequest, @@ -221,12 +247,35 @@ async def post_delete_endpoint( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_delete_endpoint` interceptor runs + before the `post_delete_endpoint_with_metadata` interceptor. """ return response + async def post_delete_endpoint_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_delete_endpoint_with_metadata` + interceptor in new development instead of the `post_delete_endpoint` interceptor. + When both interceptors are used, this `post_delete_endpoint_with_metadata` interceptor runs after the + `post_delete_endpoint` interceptor. The (possibly modified) response returned by + `post_delete_endpoint` will be passed to + `post_delete_endpoint_with_metadata`. + """ + return response, metadata + async def pre_deploy_model( self, request: endpoint_service.DeployModelRequest, @@ -246,12 +295,35 @@ async def post_deploy_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for deploy_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_deploy_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_deploy_model` interceptor runs + before the `post_deploy_model_with_metadata` interceptor. """ return response + async def post_deploy_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for deploy_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_deploy_model_with_metadata` + interceptor in new development instead of the `post_deploy_model` interceptor. + When both interceptors are used, this `post_deploy_model_with_metadata` interceptor runs after the + `post_deploy_model` interceptor. The (possibly modified) response returned by + `post_deploy_model` will be passed to + `post_deploy_model_with_metadata`. + """ + return response, metadata + async def pre_get_endpoint( self, request: endpoint_service.GetEndpointRequest, @@ -269,12 +341,35 @@ async def pre_get_endpoint( async def post_get_endpoint(self, response: endpoint.Endpoint) -> endpoint.Endpoint: """Post-rpc interceptor for get_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_get_endpoint` interceptor runs + before the `post_get_endpoint_with_metadata` interceptor. """ return response + async def post_get_endpoint_with_metadata( + self, + response: endpoint.Endpoint, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[endpoint.Endpoint, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_get_endpoint_with_metadata` + interceptor in new development instead of the `post_get_endpoint` interceptor. + When both interceptors are used, this `post_get_endpoint_with_metadata` interceptor runs after the + `post_get_endpoint` interceptor. The (possibly modified) response returned by + `post_get_endpoint` will be passed to + `post_get_endpoint_with_metadata`. + """ + return response, metadata + async def pre_list_endpoints( self, request: endpoint_service.ListEndpointsRequest, @@ -294,12 +389,37 @@ async def post_list_endpoints( ) -> endpoint_service.ListEndpointsResponse: """Post-rpc interceptor for list_endpoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_endpoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_list_endpoints` interceptor runs + before the `post_list_endpoints_with_metadata` interceptor. """ return response + async def post_list_endpoints_with_metadata( + self, + response: endpoint_service.ListEndpointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + endpoint_service.ListEndpointsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_endpoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_list_endpoints_with_metadata` + interceptor in new development instead of the `post_list_endpoints` interceptor. + When both interceptors are used, this `post_list_endpoints_with_metadata` interceptor runs after the + `post_list_endpoints` interceptor. The (possibly modified) response returned by + `post_list_endpoints` will be passed to + `post_list_endpoints_with_metadata`. + """ + return response, metadata + async def pre_mutate_deployed_model( self, request: endpoint_service.MutateDeployedModelRequest, @@ -320,12 +440,35 @@ async def post_mutate_deployed_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for mutate_deployed_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_mutate_deployed_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_mutate_deployed_model` interceptor runs + before the `post_mutate_deployed_model_with_metadata` interceptor. """ return response + async def post_mutate_deployed_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for mutate_deployed_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_mutate_deployed_model_with_metadata` + interceptor in new development instead of the `post_mutate_deployed_model` interceptor. + When both interceptors are used, this `post_mutate_deployed_model_with_metadata` interceptor runs after the + `post_mutate_deployed_model` interceptor. The (possibly modified) response returned by + `post_mutate_deployed_model` will be passed to + `post_mutate_deployed_model_with_metadata`. + """ + return response, metadata + async def pre_undeploy_model( self, request: endpoint_service.UndeployModelRequest, @@ -345,12 +488,35 @@ async def post_undeploy_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for undeploy_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_undeploy_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_undeploy_model` interceptor runs + before the `post_undeploy_model_with_metadata` interceptor. """ return response + async def post_undeploy_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for undeploy_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_undeploy_model_with_metadata` + interceptor in new development instead of the `post_undeploy_model` interceptor. + When both interceptors are used, this `post_undeploy_model_with_metadata` interceptor runs after the + `post_undeploy_model` interceptor. The (possibly modified) response returned by + `post_undeploy_model` will be passed to + `post_undeploy_model_with_metadata`. + """ + return response, metadata + async def pre_update_endpoint( self, request: endpoint_service.UpdateEndpointRequest, @@ -370,12 +536,35 @@ async def post_update_endpoint( ) -> gca_endpoint.Endpoint: """Post-rpc interceptor for update_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_update_endpoint` interceptor runs + before the `post_update_endpoint_with_metadata` interceptor. """ return response + async def post_update_endpoint_with_metadata( + self, + response: gca_endpoint.Endpoint, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_endpoint.Endpoint, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_update_endpoint_with_metadata` + interceptor in new development instead of the `post_update_endpoint` interceptor. + When both interceptors are used, this `post_update_endpoint_with_metadata` interceptor runs after the + `post_update_endpoint` interceptor. The (possibly modified) response returned by + `post_update_endpoint` will be passed to + `post_update_endpoint_with_metadata`. + """ + return response, metadata + async def pre_update_endpoint_long_running( self, request: endpoint_service.UpdateEndpointLongRunningRequest, @@ -396,12 +585,35 @@ async def post_update_endpoint_long_running( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_endpoint_long_running - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_endpoint_long_running_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EndpointService server but before - it is returned to user code. + it is returned to user code. This `post_update_endpoint_long_running` interceptor runs + before the `post_update_endpoint_long_running_with_metadata` interceptor. """ return response + async def post_update_endpoint_long_running_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_endpoint_long_running + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EndpointService server but before it is returned to user code. + + We recommend only using this `post_update_endpoint_long_running_with_metadata` + interceptor in new development instead of the `post_update_endpoint_long_running` interceptor. + When both interceptors are used, this `post_update_endpoint_long_running_with_metadata` interceptor runs after the + `post_update_endpoint_long_running` interceptor. The (possibly modified) response returned by + `post_update_endpoint_long_running` will be passed to + `post_update_endpoint_long_running_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -710,9 +922,9 @@ def __init__( self._interceptor = interceptor or AsyncEndpointServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -908,7 +1120,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -957,6 +1169,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1065,7 +1281,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1113,6 +1329,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1225,7 +1445,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1274,6 +1494,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_deploy_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_deploy_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1429,6 +1653,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1584,6 +1812,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_endpoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_endpoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1699,7 +1931,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1746,6 +1978,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_mutate_deployed_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_mutate_deployed_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1859,7 +2095,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1908,6 +2144,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_undeploy_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_undeploy_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2070,6 +2310,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2160,11 +2404,10 @@ async def __call__( _BaseEndpointServiceRestTransport._BaseUpdateEndpointLongRunning._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_endpoint_long_running( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_endpoint_long_running( + request, metadata + ) ) transcoded_request = _BaseEndpointServiceRestTransport._BaseUpdateEndpointLongRunning._get_transcoded_request( http_options, request @@ -2187,7 +2430,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2234,6 +2477,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_endpoint_long_running(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_update_endpoint_long_running_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2429,6 +2678,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -2565,6 +2818,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -2573,6 +2830,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2613,6 +2878,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2775,6 +3044,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2803,6 +3076,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2915,6 +3192,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2923,6 +3204,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2983,6 +3272,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3145,6 +3438,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -3185,6 +3482,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -3297,6 +3598,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3305,6 +3610,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3365,6 +3678,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3523,6 +3840,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -3563,6 +3884,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -3675,6 +4000,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3695,6 +4028,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -3743,6 +4080,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3909,6 +4250,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3941,6 +4286,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -4049,6 +4398,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -4057,6 +4410,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4117,6 +4478,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -4236,7 +4601,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -4389,7 +4753,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -4542,7 +4905,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4696,7 +5058,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4854,7 +5215,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -5005,7 +5365,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -5130,7 +5489,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -5255,7 +5613,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -5408,7 +5765,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -5561,7 +5917,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_base.py index 3834bd326d..3b21ab9c2b 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -701,6 +701,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -787,6 +791,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -865,6 +874,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -1046,6 +1059,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1182,6 +1199,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1190,6 +1211,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1230,6 +1259,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1411,6 +1444,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1439,6 +1476,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1551,6 +1592,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1559,6 +1604,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1619,6 +1672,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1800,6 +1857,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1840,6 +1901,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1952,6 +2017,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1960,6 +2029,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2020,6 +2097,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2197,6 +2278,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2237,6 +2322,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2349,6 +2438,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2369,6 +2466,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2417,6 +2518,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2602,6 +2707,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2634,6 +2743,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2742,6 +2855,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2750,6 +2867,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2810,6 +2935,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/__init__.py b/google/cloud/aiplatform_v1/services/evaluation_service/__init__.py index b09b156cce..f2bfe018de 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/async_client.py b/google/cloud/aiplatform_v1/services/evaluation_service/async_client.py index 8a78009556..b814320cc0 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -113,7 +114,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: EvaluationServiceAsyncClient: The constructed client. """ - return EvaluationServiceClient.from_service_account_info.__func__(EvaluationServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + EvaluationServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(EvaluationServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -129,7 +133,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: EvaluationServiceAsyncClient: The constructed client. """ - return EvaluationServiceClient.from_service_account_file.__func__(EvaluationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + EvaluationServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(EvaluationServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -274,21 +281,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.EvaluationServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.EvaluationService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.EvaluationService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.EvaluationService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.EvaluationService", + "credentialsType": None, + } + ), ) async def evaluate_instances( @@ -1087,5 +1096,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("EvaluationServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/client.py b/google/cloud/aiplatform_v1/services/evaluation_service/client.py index 0dc5d30f5e..123704dbb7 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/client.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -162,6 +165,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -327,12 +358,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = EvaluationServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -340,7 +367,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -372,20 +399,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = EvaluationServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -478,6 +499,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -570,11 +618,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = EvaluationServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + EvaluationServiceClient._read_environment_variables() + ) self._client_cert_source = EvaluationServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -687,21 +733,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.EvaluationServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.EvaluationService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.EvaluationService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.EvaluationService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.EvaluationService", + "credentialsType": None, + } + ), ) def evaluate_instances( @@ -847,16 +897,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -902,16 +956,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1074,16 +1132,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1195,16 +1257,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -1317,16 +1383,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -1377,16 +1447,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -1432,16 +1506,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -1487,21 +1565,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("EvaluationServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/__init__.py index b4caca6f07..65d8722e27 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/base.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/base.py index 0f6f762c0d..ea412bc6f7 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import evaluation_service from google.cloud.location import locations_pb2 # type: ignore @@ -36,6 +37,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class EvaluationServiceTransport(abc.ABC): """Abstract transport class for EvaluationService.""" @@ -67,9 +71,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -82,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -98,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -235,13 +243,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/grpc.py index d2d283ae18..d21fd97cf0 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -74,12 +74,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.EvaluationService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -156,9 +155,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -291,9 +291,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/grpc_asyncio.py index a9793b26e3..95d538596c 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -152,8 +152,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -204,9 +205,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest.py index bb24ccdc1f..b45c38f245 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -62,6 +63,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class EvaluationServiceRestInterceptor: """Interceptor for EvaluationService. @@ -112,12 +116,38 @@ def post_evaluate_instances( ) -> evaluation_service.EvaluateInstancesResponse: """Post-rpc interceptor for evaluate_instances - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_evaluate_instances_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EvaluationService server but before - it is returned to user code. + it is returned to user code. This `post_evaluate_instances` interceptor runs + before the `post_evaluate_instances_with_metadata` interceptor. """ return response + def post_evaluate_instances_with_metadata( + self, + response: evaluation_service.EvaluateInstancesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + evaluation_service.EvaluateInstancesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for evaluate_instances + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EvaluationService server but before it is returned to user code. + + We recommend only using this `post_evaluate_instances_with_metadata` + interceptor in new development instead of the `post_evaluate_instances` interceptor. + When both interceptors are used, this `post_evaluate_instances_with_metadata` interceptor runs after the + `post_evaluate_instances` interceptor. The (possibly modified) response returned by + `post_evaluate_instances` will be passed to + `post_evaluate_instances_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -407,9 +437,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -578,6 +609,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_evaluate_instances(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_evaluate_instances_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -655,7 +690,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -798,7 +832,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -940,7 +973,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1083,7 +1115,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1231,7 +1262,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1376,7 +1406,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1493,7 +1522,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1609,7 +1637,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -1752,7 +1779,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -1895,7 +1921,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_asyncio.py index c85eee6302..5d2d4dbafa 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,7 +36,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -79,6 +79,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncEvaluationServiceRestInterceptor: """Asynchronous Interceptor for EvaluationService. @@ -129,12 +132,38 @@ async def post_evaluate_instances( ) -> evaluation_service.EvaluateInstancesResponse: """Post-rpc interceptor for evaluate_instances - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_evaluate_instances_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the EvaluationService server but before - it is returned to user code. + it is returned to user code. This `post_evaluate_instances` interceptor runs + before the `post_evaluate_instances_with_metadata` interceptor. """ return response + async def post_evaluate_instances_with_metadata( + self, + response: evaluation_service.EvaluateInstancesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + evaluation_service.EvaluateInstancesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for evaluate_instances + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the EvaluationService server but before it is returned to user code. + + We recommend only using this `post_evaluate_instances_with_metadata` + interceptor in new development instead of the `post_evaluate_instances` interceptor. + When both interceptors are used, this `post_evaluate_instances_with_metadata` interceptor runs after the + `post_evaluate_instances` interceptor. The (possibly modified) response returned by + `post_evaluate_instances` will be passed to + `post_evaluate_instances_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -644,6 +673,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_evaluate_instances(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_evaluate_instances_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -721,7 +754,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -874,7 +906,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1027,7 +1058,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1181,7 +1211,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1339,7 +1368,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1490,7 +1518,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1613,7 +1640,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1736,7 +1762,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -1889,7 +1914,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2042,7 +2066,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_base.py index d3af380219..82c168b089 100644 --- a/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/evaluation_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -271,6 +271,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -357,6 +361,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -435,6 +444,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -616,6 +629,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -752,6 +769,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -760,6 +781,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -800,6 +829,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -981,6 +1014,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1009,6 +1046,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1121,6 +1162,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1129,6 +1174,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1189,6 +1242,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1370,6 +1427,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1410,6 +1471,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1522,6 +1587,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1530,6 +1599,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1590,6 +1667,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1767,6 +1848,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1807,6 +1892,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -1919,6 +2008,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -1939,6 +2036,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -1987,6 +2088,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2172,6 +2277,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2204,6 +2313,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2312,6 +2425,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2320,6 +2437,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2380,6 +2505,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/__init__.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/__init__.py index 0f06c4bd84..73ae18693f 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py index 5a776f909f..68b1202243 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.feature_online_store_admin_service import ( pagers, ) @@ -63,11 +62,13 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import interval_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.interval_pb2 as interval_pb2 # type: ignore from .transports.base import ( FeatureOnlineStoreAdminServiceTransport, DEFAULT_CLIENT_INFO, @@ -163,7 +164,12 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeatureOnlineStoreAdminServiceAsyncClient: The constructed client. """ - return FeatureOnlineStoreAdminServiceClient.from_service_account_info.__func__(FeatureOnlineStoreAdminServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeatureOnlineStoreAdminServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func( + FeatureOnlineStoreAdminServiceAsyncClient, info, *args, **kwargs + ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -179,7 +185,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeatureOnlineStoreAdminServiceAsyncClient: The constructed client. """ - return FeatureOnlineStoreAdminServiceClient.from_service_account_file.__func__(FeatureOnlineStoreAdminServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeatureOnlineStoreAdminServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + FeatureOnlineStoreAdminServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file @@ -324,21 +335,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeatureOnlineStoreAdminServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", + "credentialsType": None, + } + ), ) async def create_feature_online_store( @@ -451,8 +464,9 @@ async def sample_create_feature_online_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, feature_online_store, feature_online_store_id] + flattened_params = [parent, feature_online_store, feature_online_store_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -584,7 +598,10 @@ async def sample_get_feature_online_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -706,7 +723,10 @@ async def sample_list_feature_online_stores(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -842,11 +862,11 @@ async def sample_update_feature_online_store(): Updatable fields: - - ``labels`` - - ``description`` - - ``bigtable`` - - ``bigtable.auto_scaling`` - - ``bigtable.enable_multi_region_replica`` + - ``labels`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -871,7 +891,10 @@ async def sample_update_feature_online_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_online_store, update_mask]) + flattened_params = [feature_online_store, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1028,7 +1051,10 @@ async def sample_delete_feature_online_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) + flattened_params = [name, force] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1189,7 +1215,10 @@ async def sample_create_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature_view, feature_view_id]) + flattened_params = [parent, feature_view, feature_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1316,7 +1345,10 @@ async def sample_get_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1433,7 +1465,10 @@ async def sample_list_feature_views(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1563,16 +1598,16 @@ async def sample_update_feature_view(): Updatable fields: - - ``labels`` - - ``service_agent_type`` - - ``big_query_source`` - - ``big_query_source.uri`` - - ``big_query_source.entity_id_columns`` - - ``feature_registry_source`` - - ``feature_registry_source.feature_groups`` - - ``sync_config`` - - ``sync_config.cron`` - - ``optimized_config.automatic_resources`` + - ``labels`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` + - ``optimized_config.automatic_resources`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1596,7 +1631,10 @@ async def sample_update_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_view, update_mask]) + flattened_params = [feature_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1737,7 +1775,10 @@ async def sample_delete_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1859,7 +1900,10 @@ async def sample_sync_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_view]) + flattened_params = [feature_view] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1976,7 +2020,10 @@ async def sample_get_feature_view_sync(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2095,7 +2142,10 @@ async def sample_list_feature_view_syncs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2861,5 +2911,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("FeatureOnlineStoreAdminServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py index 6b72e2823d..df47e201ba 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.feature_online_store_admin_service import ( pagers, ) @@ -77,11 +78,13 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import interval_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.interval_pb2 as interval_pb2 # type: ignore from .transports.base import ( FeatureOnlineStoreAdminServiceTransport, DEFAULT_CLIENT_INFO, @@ -113,14 +116,14 @@ class FeatureOnlineStoreAdminServiceClientMeta(type): OrderedDict() ) # type: Dict[str, Type[FeatureOnlineStoreAdminServiceTransport]] _transport_registry["grpc"] = FeatureOnlineStoreAdminServiceGrpcTransport - _transport_registry[ - "grpc_asyncio" - ] = FeatureOnlineStoreAdminServiceGrpcAsyncIOTransport + _transport_registry["grpc_asyncio"] = ( + FeatureOnlineStoreAdminServiceGrpcAsyncIOTransport + ) _transport_registry["rest"] = FeatureOnlineStoreAdminServiceRestTransport if HAS_ASYNC_REST_DEPENDENCIES: # pragma: NO COVER - _transport_registry[ - "rest_asyncio" - ] = AsyncFeatureOnlineStoreAdminServiceRestTransport + _transport_registry["rest_asyncio"] = ( + AsyncFeatureOnlineStoreAdminServiceRestTransport + ) def get_transport_class( cls, @@ -194,6 +197,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -429,12 +460,10 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = ( + FeatureOnlineStoreAdminServiceClient._use_client_cert_effective() + ) use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -442,7 +471,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -474,20 +503,16 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = ( + FeatureOnlineStoreAdminServiceClient._use_client_cert_effective() + ) use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -582,6 +607,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -674,11 +726,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureOnlineStoreAdminServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureOnlineStoreAdminServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeatureOnlineStoreAdminServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert @@ -799,21 +849,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeatureOnlineStoreAdminServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", + "credentialsType": None, + } + ), ) def create_feature_online_store( @@ -926,8 +980,9 @@ def sample_create_feature_online_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, feature_online_store, feature_online_store_id] + flattened_params = [parent, feature_online_store, feature_online_store_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -1058,7 +1113,10 @@ def sample_get_feature_online_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1177,7 +1235,10 @@ def sample_list_feature_online_stores(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1312,11 +1373,11 @@ def sample_update_feature_online_store(): Updatable fields: - - ``labels`` - - ``description`` - - ``bigtable`` - - ``bigtable.auto_scaling`` - - ``bigtable.enable_multi_region_replica`` + - ``labels`` + - ``description`` + - ``bigtable`` + - ``bigtable.auto_scaling`` + - ``bigtable.enable_multi_region_replica`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1341,7 +1402,10 @@ def sample_update_feature_online_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_online_store, update_mask]) + flattened_params = [feature_online_store, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1497,7 +1561,10 @@ def sample_delete_feature_online_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) + flattened_params = [name, force] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1657,7 +1724,10 @@ def sample_create_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature_view, feature_view_id]) + flattened_params = [parent, feature_view, feature_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1781,7 +1851,10 @@ def sample_get_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1895,7 +1968,10 @@ def sample_list_feature_views(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2022,16 +2098,16 @@ def sample_update_feature_view(): Updatable fields: - - ``labels`` - - ``service_agent_type`` - - ``big_query_source`` - - ``big_query_source.uri`` - - ``big_query_source.entity_id_columns`` - - ``feature_registry_source`` - - ``feature_registry_source.feature_groups`` - - ``sync_config`` - - ``sync_config.cron`` - - ``optimized_config.automatic_resources`` + - ``labels`` + - ``service_agent_type`` + - ``big_query_source`` + - ``big_query_source.uri`` + - ``big_query_source.entity_id_columns`` + - ``feature_registry_source`` + - ``feature_registry_source.feature_groups`` + - ``sync_config`` + - ``sync_config.cron`` + - ``optimized_config.automatic_resources`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2055,7 +2131,10 @@ def sample_update_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_view, update_mask]) + flattened_params = [feature_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2193,7 +2272,10 @@ def sample_delete_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2312,7 +2394,10 @@ def sample_sync_feature_view(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_view]) + flattened_params = [feature_view] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2426,7 +2511,10 @@ def sample_get_feature_view_sync(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2542,7 +2630,10 @@ def sample_list_feature_view_syncs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2654,16 +2745,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -2709,16 +2804,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -2881,16 +2980,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -3002,16 +3105,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -3124,16 +3231,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -3184,16 +3295,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -3239,16 +3354,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -3294,21 +3413,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("FeatureOnlineStoreAdminServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/pagers.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/pagers.py index c32523d9d2..2a34fd35e0 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/__init__.py index 063abfa298..31e263b75e 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -45,9 +45,9 @@ _transport_registry["grpc_asyncio"] = FeatureOnlineStoreAdminServiceGrpcAsyncIOTransport _transport_registry["rest"] = FeatureOnlineStoreAdminServiceRestTransport if HAS_REST_ASYNC: # pragma: NO COVER - _transport_registry[ - "rest_asyncio" - ] = AsyncFeatureOnlineStoreAdminServiceRestTransport + _transport_registry["rest_asyncio"] = ( + AsyncFeatureOnlineStoreAdminServiceRestTransport + ) __all__ = ( "FeatureOnlineStoreAdminServiceTransport", diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py index 7751f96533..74e8bb4e92 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import feature_online_store from google.cloud.aiplatform_v1.types import feature_online_store_admin_service @@ -40,6 +41,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeatureOnlineStoreAdminServiceTransport(abc.ABC): """Abstract transport class for FeatureOnlineStoreAdminService.""" @@ -71,9 +75,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -86,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -102,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -429,13 +437,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py index 57a93ad60b..d10fa3b174 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -78,12 +78,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -163,9 +162,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -299,9 +299,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -372,12 +373,12 @@ def create_feature_online_store( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_feature_online_store" not in self._stubs: - self._stubs[ - "create_feature_online_store" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/CreateFeatureOnlineStore", - request_serializer=feature_online_store_admin_service.CreateFeatureOnlineStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_feature_online_store"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/CreateFeatureOnlineStore", + request_serializer=feature_online_store_admin_service.CreateFeatureOnlineStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_feature_online_store"] @@ -433,12 +434,12 @@ def list_feature_online_stores( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_feature_online_stores" not in self._stubs: - self._stubs[ - "list_feature_online_stores" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/ListFeatureOnlineStores", - request_serializer=feature_online_store_admin_service.ListFeatureOnlineStoresRequest.serialize, - response_deserializer=feature_online_store_admin_service.ListFeatureOnlineStoresResponse.deserialize, + self._stubs["list_feature_online_stores"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/ListFeatureOnlineStores", + request_serializer=feature_online_store_admin_service.ListFeatureOnlineStoresRequest.serialize, + response_deserializer=feature_online_store_admin_service.ListFeatureOnlineStoresResponse.deserialize, + ) ) return self._stubs["list_feature_online_stores"] @@ -465,12 +466,12 @@ def update_feature_online_store( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_feature_online_store" not in self._stubs: - self._stubs[ - "update_feature_online_store" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/UpdateFeatureOnlineStore", - request_serializer=feature_online_store_admin_service.UpdateFeatureOnlineStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_feature_online_store"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/UpdateFeatureOnlineStore", + request_serializer=feature_online_store_admin_service.UpdateFeatureOnlineStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_feature_online_store"] @@ -497,12 +498,12 @@ def delete_feature_online_store( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_feature_online_store" not in self._stubs: - self._stubs[ - "delete_feature_online_store" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/DeleteFeatureOnlineStore", - request_serializer=feature_online_store_admin_service.DeleteFeatureOnlineStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_feature_online_store"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/DeleteFeatureOnlineStore", + request_serializer=feature_online_store_admin_service.DeleteFeatureOnlineStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_feature_online_store"] diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py index 5ba580cd46..30b550939a 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -159,8 +159,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -211,9 +212,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -379,12 +381,12 @@ def create_feature_online_store( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_feature_online_store" not in self._stubs: - self._stubs[ - "create_feature_online_store" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/CreateFeatureOnlineStore", - request_serializer=feature_online_store_admin_service.CreateFeatureOnlineStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_feature_online_store"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/CreateFeatureOnlineStore", + request_serializer=feature_online_store_admin_service.CreateFeatureOnlineStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_feature_online_store"] @@ -440,12 +442,12 @@ def list_feature_online_stores( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_feature_online_stores" not in self._stubs: - self._stubs[ - "list_feature_online_stores" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/ListFeatureOnlineStores", - request_serializer=feature_online_store_admin_service.ListFeatureOnlineStoresRequest.serialize, - response_deserializer=feature_online_store_admin_service.ListFeatureOnlineStoresResponse.deserialize, + self._stubs["list_feature_online_stores"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/ListFeatureOnlineStores", + request_serializer=feature_online_store_admin_service.ListFeatureOnlineStoresRequest.serialize, + response_deserializer=feature_online_store_admin_service.ListFeatureOnlineStoresResponse.deserialize, + ) ) return self._stubs["list_feature_online_stores"] @@ -472,12 +474,12 @@ def update_feature_online_store( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_feature_online_store" not in self._stubs: - self._stubs[ - "update_feature_online_store" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/UpdateFeatureOnlineStore", - request_serializer=feature_online_store_admin_service.UpdateFeatureOnlineStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_feature_online_store"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/UpdateFeatureOnlineStore", + request_serializer=feature_online_store_admin_service.UpdateFeatureOnlineStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_feature_online_store"] @@ -504,12 +506,12 @@ def delete_feature_online_store( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_feature_online_store" not in self._stubs: - self._stubs[ - "delete_feature_online_store" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/DeleteFeatureOnlineStore", - request_serializer=feature_online_store_admin_service.DeleteFeatureOnlineStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_feature_online_store"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreAdminService/DeleteFeatureOnlineStore", + request_serializer=feature_online_store_admin_service.DeleteFeatureOnlineStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_feature_online_store"] diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py index a3e0413311..565c115e71 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -66,6 +67,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeatureOnlineStoreAdminServiceRestInterceptor: """Interceptor for FeatureOnlineStoreAdminService. @@ -212,12 +216,35 @@ def post_create_feature_online_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature_online_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_online_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature_online_store` interceptor runs + before the `post_create_feature_online_store_with_metadata` interceptor. """ return response + def post_create_feature_online_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature_online_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_create_feature_online_store_with_metadata` + interceptor in new development instead of the `post_create_feature_online_store` interceptor. + When both interceptors are used, this `post_create_feature_online_store_with_metadata` interceptor runs after the + `post_create_feature_online_store` interceptor. The (possibly modified) response returned by + `post_create_feature_online_store` will be passed to + `post_create_feature_online_store_with_metadata`. + """ + return response, metadata + def pre_create_feature_view( self, request: feature_online_store_admin_service.CreateFeatureViewRequest, @@ -238,12 +265,35 @@ def post_create_feature_view( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature_view` interceptor runs + before the `post_create_feature_view_with_metadata` interceptor. """ return response + def post_create_feature_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_create_feature_view_with_metadata` + interceptor in new development instead of the `post_create_feature_view` interceptor. + When both interceptors are used, this `post_create_feature_view_with_metadata` interceptor runs after the + `post_create_feature_view` interceptor. The (possibly modified) response returned by + `post_create_feature_view` will be passed to + `post_create_feature_view_with_metadata`. + """ + return response, metadata + def pre_delete_feature_online_store( self, request: feature_online_store_admin_service.DeleteFeatureOnlineStoreRequest, @@ -264,12 +314,35 @@ def post_delete_feature_online_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature_online_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_online_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature_online_store` interceptor runs + before the `post_delete_feature_online_store_with_metadata` interceptor. """ return response + def post_delete_feature_online_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature_online_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_online_store_with_metadata` + interceptor in new development instead of the `post_delete_feature_online_store` interceptor. + When both interceptors are used, this `post_delete_feature_online_store_with_metadata` interceptor runs after the + `post_delete_feature_online_store` interceptor. The (possibly modified) response returned by + `post_delete_feature_online_store` will be passed to + `post_delete_feature_online_store_with_metadata`. + """ + return response, metadata + def pre_delete_feature_view( self, request: feature_online_store_admin_service.DeleteFeatureViewRequest, @@ -290,12 +363,35 @@ def post_delete_feature_view( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature_view` interceptor runs + before the `post_delete_feature_view_with_metadata` interceptor. """ return response + def post_delete_feature_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_view_with_metadata` + interceptor in new development instead of the `post_delete_feature_view` interceptor. + When both interceptors are used, this `post_delete_feature_view_with_metadata` interceptor runs after the + `post_delete_feature_view` interceptor. The (possibly modified) response returned by + `post_delete_feature_view` will be passed to + `post_delete_feature_view_with_metadata`. + """ + return response, metadata + def pre_get_feature_online_store( self, request: feature_online_store_admin_service.GetFeatureOnlineStoreRequest, @@ -316,12 +412,37 @@ def post_get_feature_online_store( ) -> feature_online_store.FeatureOnlineStore: """Post-rpc interceptor for get_feature_online_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_online_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature_online_store` interceptor runs + before the `post_get_feature_online_store_with_metadata` interceptor. """ return response + def post_get_feature_online_store_with_metadata( + self, + response: feature_online_store.FeatureOnlineStore, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store.FeatureOnlineStore, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_feature_online_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_get_feature_online_store_with_metadata` + interceptor in new development instead of the `post_get_feature_online_store` interceptor. + When both interceptors are used, this `post_get_feature_online_store_with_metadata` interceptor runs after the + `post_get_feature_online_store` interceptor. The (possibly modified) response returned by + `post_get_feature_online_store` will be passed to + `post_get_feature_online_store_with_metadata`. + """ + return response, metadata + def pre_get_feature_view( self, request: feature_online_store_admin_service.GetFeatureViewRequest, @@ -342,12 +463,35 @@ def post_get_feature_view( ) -> feature_view.FeatureView: """Post-rpc interceptor for get_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature_view` interceptor runs + before the `post_get_feature_view_with_metadata` interceptor. """ return response + def post_get_feature_view_with_metadata( + self, + response: feature_view.FeatureView, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[feature_view.FeatureView, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_get_feature_view_with_metadata` + interceptor in new development instead of the `post_get_feature_view` interceptor. + When both interceptors are used, this `post_get_feature_view_with_metadata` interceptor runs after the + `post_get_feature_view` interceptor. The (possibly modified) response returned by + `post_get_feature_view` will be passed to + `post_get_feature_view_with_metadata`. + """ + return response, metadata + def pre_get_feature_view_sync( self, request: feature_online_store_admin_service.GetFeatureViewSyncRequest, @@ -368,12 +512,37 @@ def post_get_feature_view_sync( ) -> feature_view_sync.FeatureViewSync: """Post-rpc interceptor for get_feature_view_sync - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_view_sync_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature_view_sync` interceptor runs + before the `post_get_feature_view_sync_with_metadata` interceptor. """ return response + def post_get_feature_view_sync_with_metadata( + self, + response: feature_view_sync.FeatureViewSync, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_view_sync.FeatureViewSync, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_feature_view_sync + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_get_feature_view_sync_with_metadata` + interceptor in new development instead of the `post_get_feature_view_sync` interceptor. + When both interceptors are used, this `post_get_feature_view_sync_with_metadata` interceptor runs after the + `post_get_feature_view_sync` interceptor. The (possibly modified) response returned by + `post_get_feature_view_sync` will be passed to + `post_get_feature_view_sync_with_metadata`. + """ + return response, metadata + def pre_list_feature_online_stores( self, request: feature_online_store_admin_service.ListFeatureOnlineStoresRequest, @@ -395,12 +564,38 @@ def post_list_feature_online_stores( ) -> feature_online_store_admin_service.ListFeatureOnlineStoresResponse: """Post-rpc interceptor for list_feature_online_stores - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_feature_online_stores_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_list_feature_online_stores` interceptor runs + before the `post_list_feature_online_stores_with_metadata` interceptor. """ return response + def post_list_feature_online_stores_with_metadata( + self, + response: feature_online_store_admin_service.ListFeatureOnlineStoresResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_admin_service.ListFeatureOnlineStoresResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_feature_online_stores + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_list_feature_online_stores_with_metadata` + interceptor in new development instead of the `post_list_feature_online_stores` interceptor. + When both interceptors are used, this `post_list_feature_online_stores_with_metadata` interceptor runs after the + `post_list_feature_online_stores` interceptor. The (possibly modified) response returned by + `post_list_feature_online_stores` will be passed to + `post_list_feature_online_stores_with_metadata`. + """ + return response, metadata + def pre_list_feature_views( self, request: feature_online_store_admin_service.ListFeatureViewsRequest, @@ -421,12 +616,38 @@ def post_list_feature_views( ) -> feature_online_store_admin_service.ListFeatureViewsResponse: """Post-rpc interceptor for list_feature_views - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_feature_views_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_list_feature_views` interceptor runs + before the `post_list_feature_views_with_metadata` interceptor. """ return response + def post_list_feature_views_with_metadata( + self, + response: feature_online_store_admin_service.ListFeatureViewsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_admin_service.ListFeatureViewsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_feature_views + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_list_feature_views_with_metadata` + interceptor in new development instead of the `post_list_feature_views` interceptor. + When both interceptors are used, this `post_list_feature_views_with_metadata` interceptor runs after the + `post_list_feature_views` interceptor. The (possibly modified) response returned by + `post_list_feature_views` will be passed to + `post_list_feature_views_with_metadata`. + """ + return response, metadata + def pre_list_feature_view_syncs( self, request: feature_online_store_admin_service.ListFeatureViewSyncsRequest, @@ -447,12 +668,38 @@ def post_list_feature_view_syncs( ) -> feature_online_store_admin_service.ListFeatureViewSyncsResponse: """Post-rpc interceptor for list_feature_view_syncs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_feature_view_syncs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_list_feature_view_syncs` interceptor runs + before the `post_list_feature_view_syncs_with_metadata` interceptor. """ return response + def post_list_feature_view_syncs_with_metadata( + self, + response: feature_online_store_admin_service.ListFeatureViewSyncsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_admin_service.ListFeatureViewSyncsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_feature_view_syncs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_list_feature_view_syncs_with_metadata` + interceptor in new development instead of the `post_list_feature_view_syncs` interceptor. + When both interceptors are used, this `post_list_feature_view_syncs_with_metadata` interceptor runs after the + `post_list_feature_view_syncs` interceptor. The (possibly modified) response returned by + `post_list_feature_view_syncs` will be passed to + `post_list_feature_view_syncs_with_metadata`. + """ + return response, metadata + def pre_sync_feature_view( self, request: feature_online_store_admin_service.SyncFeatureViewRequest, @@ -473,12 +720,38 @@ def post_sync_feature_view( ) -> feature_online_store_admin_service.SyncFeatureViewResponse: """Post-rpc interceptor for sync_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_sync_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_sync_feature_view` interceptor runs + before the `post_sync_feature_view_with_metadata` interceptor. """ return response + def post_sync_feature_view_with_metadata( + self, + response: feature_online_store_admin_service.SyncFeatureViewResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_admin_service.SyncFeatureViewResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for sync_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_sync_feature_view_with_metadata` + interceptor in new development instead of the `post_sync_feature_view` interceptor. + When both interceptors are used, this `post_sync_feature_view_with_metadata` interceptor runs after the + `post_sync_feature_view` interceptor. The (possibly modified) response returned by + `post_sync_feature_view` will be passed to + `post_sync_feature_view_with_metadata`. + """ + return response, metadata + def pre_update_feature_online_store( self, request: feature_online_store_admin_service.UpdateFeatureOnlineStoreRequest, @@ -499,12 +772,35 @@ def post_update_feature_online_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_feature_online_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_online_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature_online_store` interceptor runs + before the `post_update_feature_online_store_with_metadata` interceptor. """ return response + def post_update_feature_online_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature_online_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_update_feature_online_store_with_metadata` + interceptor in new development instead of the `post_update_feature_online_store` interceptor. + When both interceptors are used, this `post_update_feature_online_store_with_metadata` interceptor runs after the + `post_update_feature_online_store` interceptor. The (possibly modified) response returned by + `post_update_feature_online_store` will be passed to + `post_update_feature_online_store_with_metadata`. + """ + return response, metadata + def pre_update_feature_view( self, request: feature_online_store_admin_service.UpdateFeatureViewRequest, @@ -525,12 +821,35 @@ def post_update_feature_view( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature_view` interceptor runs + before the `post_update_feature_view_with_metadata` interceptor. """ return response + def post_update_feature_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_update_feature_view_with_metadata` + interceptor in new development instead of the `post_update_feature_view` interceptor. + When both interceptors are used, this `post_update_feature_view_with_metadata` interceptor runs after the + `post_update_feature_view` interceptor. The (possibly modified) response returned by + `post_update_feature_view` will be passed to + `post_update_feature_view_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -823,9 +1142,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1038,6 +1358,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1174,6 +1498,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1182,6 +1510,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1222,6 +1558,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1384,6 +1724,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1412,6 +1756,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1524,6 +1872,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1532,6 +1884,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1592,6 +1952,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1754,6 +2118,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1794,6 +2162,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1906,6 +2278,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1914,6 +2290,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1974,6 +2358,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2132,6 +2520,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2172,6 +2564,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2284,6 +2680,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2304,6 +2708,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2352,6 +2760,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2518,6 +2930,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2550,6 +2966,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2658,6 +3078,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2666,6 +3090,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2726,6 +3158,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2837,7 +3273,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2877,6 +3313,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_feature_online_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_feature_online_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2989,7 +3429,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3029,6 +3469,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3139,7 +3583,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3178,6 +3622,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_feature_online_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_feature_online_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3285,7 +3733,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3324,6 +3772,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3476,6 +3928,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_feature_online_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_feature_online_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3626,6 +4082,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3777,6 +4237,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_feature_view_sync(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_feature_view_sync_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3933,6 +4397,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_feature_online_stores(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_feature_online_stores_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4084,6 +4552,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_feature_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_feature_views_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4237,6 +4709,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_feature_view_syncs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_feature_view_syncs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4394,6 +4870,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_sync_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_sync_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4511,7 +4991,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4551,6 +5031,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_feature_online_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_feature_online_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4663,7 +5147,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4703,6 +5187,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4911,7 +5399,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -5056,7 +5543,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -5199,7 +5685,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -5345,7 +5830,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -5497,7 +5981,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -5642,7 +6125,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -5759,7 +6241,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -5876,7 +6357,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -6021,7 +6501,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -6164,7 +6643,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py index d8b92d337b..8e2ea5cdb3 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -84,6 +84,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncFeatureOnlineStoreAdminServiceRestInterceptor: """Asynchronous Interceptor for FeatureOnlineStoreAdminService. @@ -230,12 +233,35 @@ async def post_create_feature_online_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature_online_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_online_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature_online_store` interceptor runs + before the `post_create_feature_online_store_with_metadata` interceptor. """ return response + async def post_create_feature_online_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature_online_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_create_feature_online_store_with_metadata` + interceptor in new development instead of the `post_create_feature_online_store` interceptor. + When both interceptors are used, this `post_create_feature_online_store_with_metadata` interceptor runs after the + `post_create_feature_online_store` interceptor. The (possibly modified) response returned by + `post_create_feature_online_store` will be passed to + `post_create_feature_online_store_with_metadata`. + """ + return response, metadata + async def pre_create_feature_view( self, request: feature_online_store_admin_service.CreateFeatureViewRequest, @@ -256,12 +282,35 @@ async def post_create_feature_view( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature_view` interceptor runs + before the `post_create_feature_view_with_metadata` interceptor. """ return response + async def post_create_feature_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_create_feature_view_with_metadata` + interceptor in new development instead of the `post_create_feature_view` interceptor. + When both interceptors are used, this `post_create_feature_view_with_metadata` interceptor runs after the + `post_create_feature_view` interceptor. The (possibly modified) response returned by + `post_create_feature_view` will be passed to + `post_create_feature_view_with_metadata`. + """ + return response, metadata + async def pre_delete_feature_online_store( self, request: feature_online_store_admin_service.DeleteFeatureOnlineStoreRequest, @@ -282,12 +331,35 @@ async def post_delete_feature_online_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature_online_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_online_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature_online_store` interceptor runs + before the `post_delete_feature_online_store_with_metadata` interceptor. """ return response + async def post_delete_feature_online_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature_online_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_online_store_with_metadata` + interceptor in new development instead of the `post_delete_feature_online_store` interceptor. + When both interceptors are used, this `post_delete_feature_online_store_with_metadata` interceptor runs after the + `post_delete_feature_online_store` interceptor. The (possibly modified) response returned by + `post_delete_feature_online_store` will be passed to + `post_delete_feature_online_store_with_metadata`. + """ + return response, metadata + async def pre_delete_feature_view( self, request: feature_online_store_admin_service.DeleteFeatureViewRequest, @@ -308,12 +380,35 @@ async def post_delete_feature_view( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature_view` interceptor runs + before the `post_delete_feature_view_with_metadata` interceptor. """ return response + async def post_delete_feature_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_view_with_metadata` + interceptor in new development instead of the `post_delete_feature_view` interceptor. + When both interceptors are used, this `post_delete_feature_view_with_metadata` interceptor runs after the + `post_delete_feature_view` interceptor. The (possibly modified) response returned by + `post_delete_feature_view` will be passed to + `post_delete_feature_view_with_metadata`. + """ + return response, metadata + async def pre_get_feature_online_store( self, request: feature_online_store_admin_service.GetFeatureOnlineStoreRequest, @@ -334,12 +429,37 @@ async def post_get_feature_online_store( ) -> feature_online_store.FeatureOnlineStore: """Post-rpc interceptor for get_feature_online_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_online_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature_online_store` interceptor runs + before the `post_get_feature_online_store_with_metadata` interceptor. """ return response + async def post_get_feature_online_store_with_metadata( + self, + response: feature_online_store.FeatureOnlineStore, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store.FeatureOnlineStore, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_feature_online_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_get_feature_online_store_with_metadata` + interceptor in new development instead of the `post_get_feature_online_store` interceptor. + When both interceptors are used, this `post_get_feature_online_store_with_metadata` interceptor runs after the + `post_get_feature_online_store` interceptor. The (possibly modified) response returned by + `post_get_feature_online_store` will be passed to + `post_get_feature_online_store_with_metadata`. + """ + return response, metadata + async def pre_get_feature_view( self, request: feature_online_store_admin_service.GetFeatureViewRequest, @@ -360,12 +480,35 @@ async def post_get_feature_view( ) -> feature_view.FeatureView: """Post-rpc interceptor for get_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature_view` interceptor runs + before the `post_get_feature_view_with_metadata` interceptor. """ return response + async def post_get_feature_view_with_metadata( + self, + response: feature_view.FeatureView, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[feature_view.FeatureView, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_get_feature_view_with_metadata` + interceptor in new development instead of the `post_get_feature_view` interceptor. + When both interceptors are used, this `post_get_feature_view_with_metadata` interceptor runs after the + `post_get_feature_view` interceptor. The (possibly modified) response returned by + `post_get_feature_view` will be passed to + `post_get_feature_view_with_metadata`. + """ + return response, metadata + async def pre_get_feature_view_sync( self, request: feature_online_store_admin_service.GetFeatureViewSyncRequest, @@ -386,12 +529,37 @@ async def post_get_feature_view_sync( ) -> feature_view_sync.FeatureViewSync: """Post-rpc interceptor for get_feature_view_sync - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_view_sync_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature_view_sync` interceptor runs + before the `post_get_feature_view_sync_with_metadata` interceptor. """ return response + async def post_get_feature_view_sync_with_metadata( + self, + response: feature_view_sync.FeatureViewSync, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_view_sync.FeatureViewSync, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_feature_view_sync + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_get_feature_view_sync_with_metadata` + interceptor in new development instead of the `post_get_feature_view_sync` interceptor. + When both interceptors are used, this `post_get_feature_view_sync_with_metadata` interceptor runs after the + `post_get_feature_view_sync` interceptor. The (possibly modified) response returned by + `post_get_feature_view_sync` will be passed to + `post_get_feature_view_sync_with_metadata`. + """ + return response, metadata + async def pre_list_feature_online_stores( self, request: feature_online_store_admin_service.ListFeatureOnlineStoresRequest, @@ -413,12 +581,38 @@ async def post_list_feature_online_stores( ) -> feature_online_store_admin_service.ListFeatureOnlineStoresResponse: """Post-rpc interceptor for list_feature_online_stores - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_feature_online_stores_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_list_feature_online_stores` interceptor runs + before the `post_list_feature_online_stores_with_metadata` interceptor. """ return response + async def post_list_feature_online_stores_with_metadata( + self, + response: feature_online_store_admin_service.ListFeatureOnlineStoresResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_admin_service.ListFeatureOnlineStoresResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_feature_online_stores + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_list_feature_online_stores_with_metadata` + interceptor in new development instead of the `post_list_feature_online_stores` interceptor. + When both interceptors are used, this `post_list_feature_online_stores_with_metadata` interceptor runs after the + `post_list_feature_online_stores` interceptor. The (possibly modified) response returned by + `post_list_feature_online_stores` will be passed to + `post_list_feature_online_stores_with_metadata`. + """ + return response, metadata + async def pre_list_feature_views( self, request: feature_online_store_admin_service.ListFeatureViewsRequest, @@ -439,12 +633,38 @@ async def post_list_feature_views( ) -> feature_online_store_admin_service.ListFeatureViewsResponse: """Post-rpc interceptor for list_feature_views - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_feature_views_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_list_feature_views` interceptor runs + before the `post_list_feature_views_with_metadata` interceptor. """ return response + async def post_list_feature_views_with_metadata( + self, + response: feature_online_store_admin_service.ListFeatureViewsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_admin_service.ListFeatureViewsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_feature_views + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_list_feature_views_with_metadata` + interceptor in new development instead of the `post_list_feature_views` interceptor. + When both interceptors are used, this `post_list_feature_views_with_metadata` interceptor runs after the + `post_list_feature_views` interceptor. The (possibly modified) response returned by + `post_list_feature_views` will be passed to + `post_list_feature_views_with_metadata`. + """ + return response, metadata + async def pre_list_feature_view_syncs( self, request: feature_online_store_admin_service.ListFeatureViewSyncsRequest, @@ -465,12 +685,38 @@ async def post_list_feature_view_syncs( ) -> feature_online_store_admin_service.ListFeatureViewSyncsResponse: """Post-rpc interceptor for list_feature_view_syncs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_feature_view_syncs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_list_feature_view_syncs` interceptor runs + before the `post_list_feature_view_syncs_with_metadata` interceptor. """ return response + async def post_list_feature_view_syncs_with_metadata( + self, + response: feature_online_store_admin_service.ListFeatureViewSyncsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_admin_service.ListFeatureViewSyncsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_feature_view_syncs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_list_feature_view_syncs_with_metadata` + interceptor in new development instead of the `post_list_feature_view_syncs` interceptor. + When both interceptors are used, this `post_list_feature_view_syncs_with_metadata` interceptor runs after the + `post_list_feature_view_syncs` interceptor. The (possibly modified) response returned by + `post_list_feature_view_syncs` will be passed to + `post_list_feature_view_syncs_with_metadata`. + """ + return response, metadata + async def pre_sync_feature_view( self, request: feature_online_store_admin_service.SyncFeatureViewRequest, @@ -491,12 +737,38 @@ async def post_sync_feature_view( ) -> feature_online_store_admin_service.SyncFeatureViewResponse: """Post-rpc interceptor for sync_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_sync_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_sync_feature_view` interceptor runs + before the `post_sync_feature_view_with_metadata` interceptor. """ return response + async def post_sync_feature_view_with_metadata( + self, + response: feature_online_store_admin_service.SyncFeatureViewResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_admin_service.SyncFeatureViewResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for sync_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_sync_feature_view_with_metadata` + interceptor in new development instead of the `post_sync_feature_view` interceptor. + When both interceptors are used, this `post_sync_feature_view_with_metadata` interceptor runs after the + `post_sync_feature_view` interceptor. The (possibly modified) response returned by + `post_sync_feature_view` will be passed to + `post_sync_feature_view_with_metadata`. + """ + return response, metadata + async def pre_update_feature_online_store( self, request: feature_online_store_admin_service.UpdateFeatureOnlineStoreRequest, @@ -517,12 +789,35 @@ async def post_update_feature_online_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_feature_online_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_online_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature_online_store` interceptor runs + before the `post_update_feature_online_store_with_metadata` interceptor. """ return response + async def post_update_feature_online_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature_online_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_update_feature_online_store_with_metadata` + interceptor in new development instead of the `post_update_feature_online_store` interceptor. + When both interceptors are used, this `post_update_feature_online_store_with_metadata` interceptor runs after the + `post_update_feature_online_store` interceptor. The (possibly modified) response returned by + `post_update_feature_online_store` will be passed to + `post_update_feature_online_store_with_metadata`. + """ + return response, metadata + async def pre_update_feature_view( self, request: feature_online_store_admin_service.UpdateFeatureViewRequest, @@ -543,12 +838,35 @@ async def post_update_feature_view( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_feature_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreAdminService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature_view` interceptor runs + before the `post_update_feature_view_with_metadata` interceptor. """ return response + async def post_update_feature_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreAdminService server but before it is returned to user code. + + We recommend only using this `post_update_feature_view_with_metadata` + interceptor in new development instead of the `post_update_feature_view` interceptor. + When both interceptors are used, this `post_update_feature_view_with_metadata` interceptor runs after the + `post_update_feature_view` interceptor. The (possibly modified) response returned by + `post_update_feature_view` will be passed to + `post_update_feature_view_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -864,9 +1182,9 @@ def __init__( ) self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -1085,7 +1403,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1132,6 +1450,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_feature_online_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_feature_online_store_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1247,7 +1571,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1294,6 +1618,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1405,7 +1733,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1451,6 +1779,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_feature_online_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_feature_online_store_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1561,7 +1895,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1607,6 +1941,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1765,6 +2103,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature_online_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_feature_online_store_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1923,6 +2267,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2080,6 +2428,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature_view_sync(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_feature_view_sync_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2242,6 +2594,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_online_stores(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_feature_online_stores_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2401,6 +2759,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_feature_views_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2560,6 +2922,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_view_syncs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_feature_view_syncs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2725,6 +3093,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_sync_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_sync_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2843,7 +3215,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2890,6 +3262,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_feature_online_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_update_feature_online_store_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3005,7 +3383,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3052,6 +3430,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_feature_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_feature_view_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3247,6 +3629,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -3383,6 +3769,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -3391,6 +3781,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3431,6 +3829,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -3593,6 +3995,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -3621,6 +4027,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -3733,6 +4143,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3741,6 +4155,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3801,6 +4223,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3963,6 +4389,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -4003,6 +4433,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -4115,6 +4549,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -4123,6 +4561,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4183,6 +4629,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -4341,6 +4791,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -4381,6 +4835,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -4493,6 +4951,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -4513,6 +4979,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -4561,6 +5031,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4727,6 +5201,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -4759,6 +5237,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -4867,6 +5349,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -4875,6 +5361,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4935,6 +5429,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -5112,7 +5610,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -5265,7 +5762,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -5416,7 +5912,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -5568,7 +6063,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -5726,7 +6220,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -5879,7 +6372,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -6004,7 +6496,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -6127,7 +6618,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -6280,7 +6770,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -6433,7 +6922,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py index 2ee04bf0c1..3751ad286d 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -922,6 +922,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -1008,6 +1012,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -1086,6 +1095,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -1267,6 +1280,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1403,6 +1420,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1411,6 +1432,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1451,6 +1480,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1632,6 +1665,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1660,6 +1697,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1772,6 +1813,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1780,6 +1825,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1840,6 +1893,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2021,6 +2078,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2061,6 +2122,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2173,6 +2238,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2181,6 +2250,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2241,6 +2318,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2418,6 +2499,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2458,6 +2543,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2570,6 +2659,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2590,6 +2687,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2638,6 +2739,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2823,6 +2928,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2855,6 +2964,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2963,6 +3076,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2971,6 +3088,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3031,6 +3156,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/__init__.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/__init__.py index a7256dc970..25a187e722 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py index 47ae8ca418..00efb7593d 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,9 @@ MutableMapping, MutableSequence, Optional, + AsyncIterable, + Awaitable, + AsyncIterator, Sequence, Tuple, Type, @@ -37,6 +40,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -49,7 +53,9 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import FeatureOnlineStoreServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import FeatureOnlineStoreServiceGrpcAsyncIOTransport from .client import FeatureOnlineStoreServiceClient @@ -126,7 +132,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeatureOnlineStoreServiceAsyncClient: The constructed client. """ - return FeatureOnlineStoreServiceClient.from_service_account_info.__func__(FeatureOnlineStoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeatureOnlineStoreServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(FeatureOnlineStoreServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -142,7 +151,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeatureOnlineStoreServiceAsyncClient: The constructed client. """ - return FeatureOnlineStoreServiceClient.from_service_account_file.__func__(FeatureOnlineStoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeatureOnlineStoreServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + FeatureOnlineStoreServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file @@ -287,21 +301,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeatureOnlineStoreServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", + "credentialsType": None, + } + ), ) async def fetch_feature_values( @@ -381,7 +397,10 @@ async def sample_fetch_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_view, data_key]) + flattened_params = [feature_view, data_key] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -529,6 +548,197 @@ async def sample_search_nearest_entities(): # Done; return the response. return response + def feature_view_direct_write( + self, + requests: Optional[ + AsyncIterator[feature_online_store_service.FeatureViewDirectWriteRequest] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Awaitable[ + AsyncIterable[feature_online_store_service.FeatureViewDirectWriteResponse] + ]: + r"""Bidirectional streaming RPC to directly write to + feature values in a feature view. Requests may not have + a one-to-one mapping to responses and responses may be + returned out-of-order to reduce latency. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_feature_view_direct_write(): + # Create a client + client = aiplatform_v1.FeatureOnlineStoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.FeatureViewDirectWriteRequest( + ) + + # This method expects an iterator which contains + # 'aiplatform_v1.FeatureViewDirectWriteRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = await client.feature_view_direct_write(requests=request_generator()) + + # Handle the response + async for response in stream: + print(response) + + Args: + requests (AsyncIterator[`google.cloud.aiplatform_v1.types.FeatureViewDirectWriteRequest`]): + The request object AsyncIterator. Request message for + [FeatureOnlineStoreService.FeatureViewDirectWrite][google.cloud.aiplatform.v1.FeatureOnlineStoreService.FeatureViewDirectWrite]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1.types.FeatureViewDirectWriteResponse]: + Response message for + [FeatureOnlineStoreService.FeatureViewDirectWrite][google.cloud.aiplatform.v1.FeatureOnlineStoreService.FeatureViewDirectWrite]. + + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.feature_view_direct_write + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = rpc( + requests, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def generate_fetch_access_token( + self, + request: Optional[ + Union[feature_online_store_service.GenerateFetchAccessTokenRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> feature_online_store_service.GenerateFetchAccessTokenResponse: + r"""RPC to generate an access token for the given feature + view. FeatureViews under the same FeatureOnlineStore + share the same access token. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_generate_fetch_access_token(): + # Create a client + client = aiplatform_v1.FeatureOnlineStoreServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GenerateFetchAccessTokenRequest( + ) + + # Make the request + response = await client.generate_fetch_access_token(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GenerateFetchAccessTokenRequest, dict]]): + The request object. Request message for + [FeatureOnlineStoreService.GenerateFetchAccessToken][google.cloud.aiplatform.v1.FeatureOnlineStoreService.GenerateFetchAccessToken]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.GenerateFetchAccessTokenResponse: + Response message for + [FeatureOnlineStoreService.GenerateFetchAccessToken][google.cloud.aiplatform.v1.FeatureOnlineStoreService.GenerateFetchAccessToken]. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, feature_online_store_service.GenerateFetchAccessTokenRequest + ): + request = feature_online_store_service.GenerateFetchAccessTokenRequest( + request + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.generate_fetch_access_token + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("feature_view", request.feature_view),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, @@ -1237,5 +1447,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("FeatureOnlineStoreServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py index 1484975278..5aa62489c2 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -24,6 +26,8 @@ MutableMapping, MutableSequence, Optional, + Iterable, + Iterator, Sequence, Tuple, Type, @@ -43,6 +47,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -63,7 +68,9 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import FeatureOnlineStoreServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import FeatureOnlineStoreServiceGrpcTransport from .transports.grpc_asyncio import FeatureOnlineStoreServiceGrpcAsyncIOTransport @@ -93,9 +100,9 @@ class FeatureOnlineStoreServiceClientMeta(type): _transport_registry["grpc_asyncio"] = FeatureOnlineStoreServiceGrpcAsyncIOTransport _transport_registry["rest"] = FeatureOnlineStoreServiceRestTransport if HAS_ASYNC_REST_DEPENDENCIES: # pragma: NO COVER - _transport_registry[ - "rest_asyncio" - ] = AsyncFeatureOnlineStoreServiceRestTransport + _transport_registry["rest_asyncio"] = ( + AsyncFeatureOnlineStoreServiceRestTransport + ) def get_transport_class( cls, @@ -165,6 +172,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -354,12 +389,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = FeatureOnlineStoreServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -367,7 +398,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -399,20 +430,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = FeatureOnlineStoreServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -507,6 +532,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -599,11 +651,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureOnlineStoreServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureOnlineStoreServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeatureOnlineStoreServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert @@ -718,21 +768,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeatureOnlineStoreServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", + "credentialsType": None, + } + ), ) def fetch_feature_values( @@ -812,7 +866,10 @@ def sample_fetch_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_view, data_key]) + flattened_params = [feature_view, data_key] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -955,6 +1012,195 @@ def sample_search_nearest_entities(): # Done; return the response. return response + def feature_view_direct_write( + self, + requests: Optional[ + Iterator[feature_online_store_service.FeatureViewDirectWriteRequest] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> Iterable[feature_online_store_service.FeatureViewDirectWriteResponse]: + r"""Bidirectional streaming RPC to directly write to + feature values in a feature view. Requests may not have + a one-to-one mapping to responses and responses may be + returned out-of-order to reduce latency. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_feature_view_direct_write(): + # Create a client + client = aiplatform_v1.FeatureOnlineStoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.FeatureViewDirectWriteRequest( + ) + + # This method expects an iterator which contains + # 'aiplatform_v1.FeatureViewDirectWriteRequest' objects + # Here we create a generator that yields a single `request` for + # demonstrative purposes. + requests = [request] + + def request_generator(): + for request in requests: + yield request + + # Make the request + stream = client.feature_view_direct_write(requests=request_generator()) + + # Handle the response + for response in stream: + print(response) + + Args: + requests (Iterator[google.cloud.aiplatform_v1.types.FeatureViewDirectWriteRequest]): + The request object iterator. Request message for + [FeatureOnlineStoreService.FeatureViewDirectWrite][google.cloud.aiplatform.v1.FeatureOnlineStoreService.FeatureViewDirectWrite]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + Iterable[google.cloud.aiplatform_v1.types.FeatureViewDirectWriteResponse]: + Response message for + [FeatureOnlineStoreService.FeatureViewDirectWrite][google.cloud.aiplatform.v1.FeatureOnlineStoreService.FeatureViewDirectWrite]. + + """ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.feature_view_direct_write + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + requests, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def generate_fetch_access_token( + self, + request: Optional[ + Union[feature_online_store_service.GenerateFetchAccessTokenRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> feature_online_store_service.GenerateFetchAccessTokenResponse: + r"""RPC to generate an access token for the given feature + view. FeatureViews under the same FeatureOnlineStore + share the same access token. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_generate_fetch_access_token(): + # Create a client + client = aiplatform_v1.FeatureOnlineStoreServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GenerateFetchAccessTokenRequest( + ) + + # Make the request + response = client.generate_fetch_access_token(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GenerateFetchAccessTokenRequest, dict]): + The request object. Request message for + [FeatureOnlineStoreService.GenerateFetchAccessToken][google.cloud.aiplatform.v1.FeatureOnlineStoreService.GenerateFetchAccessToken]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.GenerateFetchAccessTokenResponse: + Response message for + [FeatureOnlineStoreService.GenerateFetchAccessToken][google.cloud.aiplatform.v1.FeatureOnlineStoreService.GenerateFetchAccessToken]. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, feature_online_store_service.GenerateFetchAccessTokenRequest + ): + request = feature_online_store_service.GenerateFetchAccessTokenRequest( + request + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.generate_fetch_access_token + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("feature_view", request.feature_view),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "FeatureOnlineStoreServiceClient": return self @@ -1012,16 +1258,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1067,16 +1317,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1239,16 +1493,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1360,16 +1618,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -1482,16 +1744,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -1542,16 +1808,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -1597,16 +1867,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -1652,21 +1926,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("FeatureOnlineStoreServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/__init__.py index e3a6f3aa4f..1d96784925 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py index 871bd47606..71052c78ed 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import feature_online_store_service from google.cloud.location import locations_pb2 # type: ignore @@ -36,6 +37,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeatureOnlineStoreServiceTransport(abc.ABC): """Abstract transport class for FeatureOnlineStoreService.""" @@ -67,9 +71,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -82,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -98,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -143,6 +151,16 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.feature_view_direct_write: gapic_v1.method.wrap_method( + self.feature_view_direct_write, + default_timeout=None, + client_info=client_info, + ), + self.generate_fetch_access_token: gapic_v1.method.wrap_method( + self.generate_fetch_access_token, + default_timeout=None, + client_info=client_info, + ), self.get_location: gapic_v1.method.wrap_method( self.get_location, default_timeout=None, @@ -228,6 +246,30 @@ def search_nearest_entities( ]: raise NotImplementedError() + @property + def feature_view_direct_write( + self, + ) -> Callable[ + [feature_online_store_service.FeatureViewDirectWriteRequest], + Union[ + feature_online_store_service.FeatureViewDirectWriteResponse, + Awaitable[feature_online_store_service.FeatureViewDirectWriteResponse], + ], + ]: + raise NotImplementedError() + + @property + def generate_fetch_access_token( + self, + ) -> Callable[ + [feature_online_store_service.GenerateFetchAccessTokenRequest], + Union[ + feature_online_store_service.GenerateFetchAccessTokenResponse, + Awaitable[feature_online_store_service.GenerateFetchAccessTokenResponse], + ], + ]: + raise NotImplementedError() + @property def list_operations( self, @@ -252,13 +294,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc.py index 56cb59a642..f290355a59 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -74,12 +74,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -156,9 +155,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -291,9 +291,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -386,6 +387,73 @@ def search_nearest_entities( ) return self._stubs["search_nearest_entities"] + @property + def feature_view_direct_write( + self, + ) -> Callable[ + [feature_online_store_service.FeatureViewDirectWriteRequest], + feature_online_store_service.FeatureViewDirectWriteResponse, + ]: + r"""Return a callable for the feature view direct write method over gRPC. + + Bidirectional streaming RPC to directly write to + feature values in a feature view. Requests may not have + a one-to-one mapping to responses and responses may be + returned out-of-order to reduce latency. + + Returns: + Callable[[~.FeatureViewDirectWriteRequest], + ~.FeatureViewDirectWriteResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "feature_view_direct_write" not in self._stubs: + self._stubs["feature_view_direct_write"] = ( + self._logged_channel.stream_stream( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreService/FeatureViewDirectWrite", + request_serializer=feature_online_store_service.FeatureViewDirectWriteRequest.serialize, + response_deserializer=feature_online_store_service.FeatureViewDirectWriteResponse.deserialize, + ) + ) + return self._stubs["feature_view_direct_write"] + + @property + def generate_fetch_access_token( + self, + ) -> Callable[ + [feature_online_store_service.GenerateFetchAccessTokenRequest], + feature_online_store_service.GenerateFetchAccessTokenResponse, + ]: + r"""Return a callable for the generate fetch access token method over gRPC. + + RPC to generate an access token for the given feature + view. FeatureViews under the same FeatureOnlineStore + share the same access token. + + Returns: + Callable[[~.GenerateFetchAccessTokenRequest], + ~.GenerateFetchAccessTokenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_fetch_access_token" not in self._stubs: + self._stubs["generate_fetch_access_token"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreService/GenerateFetchAccessToken", + request_serializer=feature_online_store_service.GenerateFetchAccessTokenRequest.serialize, + response_deserializer=feature_online_store_service.GenerateFetchAccessTokenResponse.deserialize, + ) + ) + return self._stubs["generate_fetch_access_token"] + def close(self): self._logged_channel.close() diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc_asyncio.py index 5cf0b283b5..99a59f02c7 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -152,8 +152,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -204,9 +205,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -393,6 +395,73 @@ def search_nearest_entities( ) return self._stubs["search_nearest_entities"] + @property + def feature_view_direct_write( + self, + ) -> Callable[ + [feature_online_store_service.FeatureViewDirectWriteRequest], + Awaitable[feature_online_store_service.FeatureViewDirectWriteResponse], + ]: + r"""Return a callable for the feature view direct write method over gRPC. + + Bidirectional streaming RPC to directly write to + feature values in a feature view. Requests may not have + a one-to-one mapping to responses and responses may be + returned out-of-order to reduce latency. + + Returns: + Callable[[~.FeatureViewDirectWriteRequest], + Awaitable[~.FeatureViewDirectWriteResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "feature_view_direct_write" not in self._stubs: + self._stubs["feature_view_direct_write"] = ( + self._logged_channel.stream_stream( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreService/FeatureViewDirectWrite", + request_serializer=feature_online_store_service.FeatureViewDirectWriteRequest.serialize, + response_deserializer=feature_online_store_service.FeatureViewDirectWriteResponse.deserialize, + ) + ) + return self._stubs["feature_view_direct_write"] + + @property + def generate_fetch_access_token( + self, + ) -> Callable[ + [feature_online_store_service.GenerateFetchAccessTokenRequest], + Awaitable[feature_online_store_service.GenerateFetchAccessTokenResponse], + ]: + r"""Return a callable for the generate fetch access token method over gRPC. + + RPC to generate an access token for the given feature + view. FeatureViews under the same FeatureOnlineStore + share the same access token. + + Returns: + Callable[[~.GenerateFetchAccessTokenRequest], + Awaitable[~.GenerateFetchAccessTokenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_fetch_access_token" not in self._stubs: + self._stubs["generate_fetch_access_token"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.FeatureOnlineStoreService/GenerateFetchAccessToken", + request_serializer=feature_online_store_service.GenerateFetchAccessTokenRequest.serialize, + response_deserializer=feature_online_store_service.GenerateFetchAccessTokenResponse.deserialize, + ) + ) + return self._stubs["generate_fetch_access_token"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -406,6 +475,16 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.feature_view_direct_write: self._wrap_method( + self.feature_view_direct_write, + default_timeout=None, + client_info=client_info, + ), + self.generate_fetch_access_token: self._wrap_method( + self.generate_fetch_access_token, + default_timeout=None, + client_info=client_info, + ), self.get_location: self._wrap_method( self.get_location, default_timeout=None, diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py index 5fea64417c..2bf5722564 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -62,6 +63,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeatureOnlineStoreServiceRestInterceptor: """Interceptor for FeatureOnlineStoreService. @@ -86,6 +90,14 @@ def post_fetch_feature_values(self, response): logging.log(f"Received response: {response}") return response + def pre_generate_fetch_access_token(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_fetch_access_token(self, response): + logging.log(f"Received response: {response}") + return response + def pre_search_nearest_entities(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -120,12 +132,90 @@ def post_fetch_feature_values( ) -> feature_online_store_service.FetchFeatureValuesResponse: """Post-rpc interceptor for fetch_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_fetch_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreService server but before - it is returned to user code. + it is returned to user code. This `post_fetch_feature_values` interceptor runs + before the `post_fetch_feature_values_with_metadata` interceptor. + """ + return response + + def post_fetch_feature_values_with_metadata( + self, + response: feature_online_store_service.FetchFeatureValuesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_service.FetchFeatureValuesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for fetch_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreService server but before it is returned to user code. + + We recommend only using this `post_fetch_feature_values_with_metadata` + interceptor in new development instead of the `post_fetch_feature_values` interceptor. + When both interceptors are used, this `post_fetch_feature_values_with_metadata` interceptor runs after the + `post_fetch_feature_values` interceptor. The (possibly modified) response returned by + `post_fetch_feature_values` will be passed to + `post_fetch_feature_values_with_metadata`. + """ + return response, metadata + + def pre_generate_fetch_access_token( + self, + request: feature_online_store_service.GenerateFetchAccessTokenRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_service.GenerateFetchAccessTokenRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for generate_fetch_access_token + + Override in a subclass to manipulate the request or metadata + before they are sent to the FeatureOnlineStoreService server. + """ + return request, metadata + + def post_generate_fetch_access_token( + self, response: feature_online_store_service.GenerateFetchAccessTokenResponse + ) -> feature_online_store_service.GenerateFetchAccessTokenResponse: + """Post-rpc interceptor for generate_fetch_access_token + + DEPRECATED. Please use the `post_generate_fetch_access_token_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the FeatureOnlineStoreService server but before + it is returned to user code. This `post_generate_fetch_access_token` interceptor runs + before the `post_generate_fetch_access_token_with_metadata` interceptor. """ return response + def post_generate_fetch_access_token_with_metadata( + self, + response: feature_online_store_service.GenerateFetchAccessTokenResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_service.GenerateFetchAccessTokenResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for generate_fetch_access_token + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreService server but before it is returned to user code. + + We recommend only using this `post_generate_fetch_access_token_with_metadata` + interceptor in new development instead of the `post_generate_fetch_access_token` interceptor. + When both interceptors are used, this `post_generate_fetch_access_token_with_metadata` interceptor runs after the + `post_generate_fetch_access_token` interceptor. The (possibly modified) response returned by + `post_generate_fetch_access_token` will be passed to + `post_generate_fetch_access_token_with_metadata`. + """ + return response, metadata + def pre_search_nearest_entities( self, request: feature_online_store_service.SearchNearestEntitiesRequest, @@ -146,12 +236,38 @@ def post_search_nearest_entities( ) -> feature_online_store_service.SearchNearestEntitiesResponse: """Post-rpc interceptor for search_nearest_entities - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_nearest_entities_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreService server but before - it is returned to user code. + it is returned to user code. This `post_search_nearest_entities` interceptor runs + before the `post_search_nearest_entities_with_metadata` interceptor. """ return response + def post_search_nearest_entities_with_metadata( + self, + response: feature_online_store_service.SearchNearestEntitiesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_service.SearchNearestEntitiesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for search_nearest_entities + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreService server but before it is returned to user code. + + We recommend only using this `post_search_nearest_entities_with_metadata` + interceptor in new development instead of the `post_search_nearest_entities` interceptor. + When both interceptors are used, this `post_search_nearest_entities_with_metadata` interceptor runs after the + `post_search_nearest_entities` interceptor. The (possibly modified) response returned by + `post_search_nearest_entities` will be passed to + `post_search_nearest_entities_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -443,9 +559,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -484,6 +601,25 @@ def __init__( self._interceptor = interceptor or FeatureOnlineStoreServiceRestInterceptor() self._prep_wrapped_messages(client_info) + class _FeatureViewDirectWrite( + _BaseFeatureOnlineStoreServiceRestTransport._BaseFeatureViewDirectWrite, + FeatureOnlineStoreServiceRestStub, + ): + def __hash__(self): + return hash("FeatureOnlineStoreServiceRestTransport.FeatureViewDirectWrite") + + def __call__( + self, + request: feature_online_store_service.FeatureViewDirectWriteRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> rest_streaming.ResponseIterator: + raise NotImplementedError( + "Method FeatureViewDirectWrite is not available over REST transport" + ) + class _FetchFeatureValues( _BaseFeatureOnlineStoreServiceRestTransport._BaseFetchFeatureValues, FeatureOnlineStoreServiceRestStub, @@ -616,6 +752,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_fetch_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_fetch_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -643,6 +783,170 @@ def __call__( ) return resp + class _GenerateFetchAccessToken( + _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken, + FeatureOnlineStoreServiceRestStub, + ): + def __hash__(self): + return hash( + "FeatureOnlineStoreServiceRestTransport.GenerateFetchAccessToken" + ) + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: feature_online_store_service.GenerateFetchAccessTokenRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> feature_online_store_service.GenerateFetchAccessTokenResponse: + r"""Call the generate fetch access + token method over HTTP. + + Args: + request (~.feature_online_store_service.GenerateFetchAccessTokenRequest): + The request object. Request message for + [FeatureOnlineStoreService.GenerateFetchAccessToken][google.cloud.aiplatform.v1.FeatureOnlineStoreService.GenerateFetchAccessToken]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.feature_online_store_service.GenerateFetchAccessTokenResponse: + Response message for + [FeatureOnlineStoreService.GenerateFetchAccessToken][google.cloud.aiplatform.v1.FeatureOnlineStoreService.GenerateFetchAccessToken]. + + """ + + http_options = ( + _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken._get_http_options() + ) + + request, metadata = self._interceptor.pre_generate_fetch_access_token( + request, metadata + ) + transcoded_request = _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken._get_transcoded_request( + http_options, request + ) + + body = _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.FeatureOnlineStoreServiceClient.GenerateFetchAccessToken", + extra={ + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", + "rpcName": "GenerateFetchAccessToken", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = FeatureOnlineStoreServiceRestTransport._GenerateFetchAccessToken._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = feature_online_store_service.GenerateFetchAccessTokenResponse() + pb_resp = feature_online_store_service.GenerateFetchAccessTokenResponse.pb( + resp + ) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_generate_fetch_access_token(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_generate_fetch_access_token_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = feature_online_store_service.GenerateFetchAccessTokenResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.FeatureOnlineStoreServiceClient.generate_fetch_access_token", + extra={ + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", + "rpcName": "GenerateFetchAccessToken", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + class _SearchNearestEntities( _BaseFeatureOnlineStoreServiceRestTransport._BaseSearchNearestEntities, FeatureOnlineStoreServiceRestStub, @@ -775,6 +1079,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_search_nearest_entities(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_search_nearest_entities_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -800,6 +1108,17 @@ def __call__( ) return resp + @property + def feature_view_direct_write( + self, + ) -> Callable[ + [feature_online_store_service.FeatureViewDirectWriteRequest], + feature_online_store_service.FeatureViewDirectWriteResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._FeatureViewDirectWrite(self._session, self._host, self._interceptor) # type: ignore + @property def fetch_feature_values( self, @@ -811,6 +1130,17 @@ def fetch_feature_values( # In C++ this would require a dynamic_cast return self._FetchFeatureValues(self._session, self._host, self._interceptor) # type: ignore + @property + def generate_fetch_access_token( + self, + ) -> Callable[ + [feature_online_store_service.GenerateFetchAccessTokenRequest], + feature_online_store_service.GenerateFetchAccessTokenResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateFetchAccessToken(self._session, self._host, self._interceptor) # type: ignore + @property def search_nearest_entities( self, @@ -864,7 +1194,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -1009,7 +1338,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1154,7 +1482,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1300,7 +1627,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1450,7 +1776,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1595,7 +1920,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1714,7 +2038,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1833,7 +2156,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -1978,7 +2300,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2123,7 +2444,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_asyncio.py index e13eceebc4..f131ec927a 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,7 +36,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -79,6 +79,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncFeatureOnlineStoreServiceRestInterceptor: """Asynchronous Interceptor for FeatureOnlineStoreService. @@ -103,6 +106,14 @@ async def post_fetch_feature_values(self, response): logging.log(f"Received response: {response}") return response + async def pre_generate_fetch_access_token(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_generate_fetch_access_token(self, response): + logging.log(f"Received response: {response}") + return response + async def pre_search_nearest_entities(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -137,12 +148,90 @@ async def post_fetch_feature_values( ) -> feature_online_store_service.FetchFeatureValuesResponse: """Post-rpc interceptor for fetch_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_fetch_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreService server but before - it is returned to user code. + it is returned to user code. This `post_fetch_feature_values` interceptor runs + before the `post_fetch_feature_values_with_metadata` interceptor. """ return response + async def post_fetch_feature_values_with_metadata( + self, + response: feature_online_store_service.FetchFeatureValuesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_service.FetchFeatureValuesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for fetch_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreService server but before it is returned to user code. + + We recommend only using this `post_fetch_feature_values_with_metadata` + interceptor in new development instead of the `post_fetch_feature_values` interceptor. + When both interceptors are used, this `post_fetch_feature_values_with_metadata` interceptor runs after the + `post_fetch_feature_values` interceptor. The (possibly modified) response returned by + `post_fetch_feature_values` will be passed to + `post_fetch_feature_values_with_metadata`. + """ + return response, metadata + + async def pre_generate_fetch_access_token( + self, + request: feature_online_store_service.GenerateFetchAccessTokenRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_service.GenerateFetchAccessTokenRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for generate_fetch_access_token + + Override in a subclass to manipulate the request or metadata + before they are sent to the FeatureOnlineStoreService server. + """ + return request, metadata + + async def post_generate_fetch_access_token( + self, response: feature_online_store_service.GenerateFetchAccessTokenResponse + ) -> feature_online_store_service.GenerateFetchAccessTokenResponse: + """Post-rpc interceptor for generate_fetch_access_token + + DEPRECATED. Please use the `post_generate_fetch_access_token_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the FeatureOnlineStoreService server but before + it is returned to user code. This `post_generate_fetch_access_token` interceptor runs + before the `post_generate_fetch_access_token_with_metadata` interceptor. + """ + return response + + async def post_generate_fetch_access_token_with_metadata( + self, + response: feature_online_store_service.GenerateFetchAccessTokenResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_service.GenerateFetchAccessTokenResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for generate_fetch_access_token + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreService server but before it is returned to user code. + + We recommend only using this `post_generate_fetch_access_token_with_metadata` + interceptor in new development instead of the `post_generate_fetch_access_token` interceptor. + When both interceptors are used, this `post_generate_fetch_access_token_with_metadata` interceptor runs after the + `post_generate_fetch_access_token` interceptor. The (possibly modified) response returned by + `post_generate_fetch_access_token` will be passed to + `post_generate_fetch_access_token_with_metadata`. + """ + return response, metadata + async def pre_search_nearest_entities( self, request: feature_online_store_service.SearchNearestEntitiesRequest, @@ -163,12 +252,38 @@ async def post_search_nearest_entities( ) -> feature_online_store_service.SearchNearestEntitiesResponse: """Post-rpc interceptor for search_nearest_entities - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_nearest_entities_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureOnlineStoreService server but before - it is returned to user code. + it is returned to user code. This `post_search_nearest_entities` interceptor runs + before the `post_search_nearest_entities_with_metadata` interceptor. """ return response + async def post_search_nearest_entities_with_metadata( + self, + response: feature_online_store_service.SearchNearestEntitiesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_online_store_service.SearchNearestEntitiesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for search_nearest_entities + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureOnlineStoreService server but before it is returned to user code. + + We recommend only using this `post_search_nearest_entities_with_metadata` + interceptor in new development instead of the `post_search_nearest_entities` interceptor. + When both interceptors are used, this `post_search_nearest_entities_with_metadata` interceptor runs after the + `post_search_nearest_entities` interceptor. The (possibly modified) response returned by + `post_search_nearest_entities` will be passed to + `post_search_nearest_entities_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -495,6 +610,16 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.feature_view_direct_write: self._wrap_method( + self.feature_view_direct_write, + default_timeout=None, + client_info=client_info, + ), + self.generate_fetch_access_token: self._wrap_method( + self.generate_fetch_access_token, + default_timeout=None, + client_info=client_info, + ), self.get_location: self._wrap_method( self.get_location, default_timeout=None, @@ -552,6 +677,27 @@ def _wrap_method(self, func, *args, **kwargs): kwargs["kind"] = self.kind return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + class _FeatureViewDirectWrite( + _BaseFeatureOnlineStoreServiceRestTransport._BaseFeatureViewDirectWrite, + AsyncFeatureOnlineStoreServiceRestStub, + ): + def __hash__(self): + return hash( + "AsyncFeatureOnlineStoreServiceRestTransport.FeatureViewDirectWrite" + ) + + async def __call__( + self, + request: feature_online_store_service.FeatureViewDirectWriteRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> rest_streaming_async.AsyncResponseIterator: + raise NotImplementedError( + "Method FeatureViewDirectWrite is not available over REST transport" + ) + class _FetchFeatureValues( _BaseFeatureOnlineStoreServiceRestTransport._BaseFetchFeatureValues, AsyncFeatureOnlineStoreServiceRestStub, @@ -691,6 +837,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_fetch_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_fetch_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -719,6 +869,178 @@ async def __call__( return resp + class _GenerateFetchAccessToken( + _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken, + AsyncFeatureOnlineStoreServiceRestStub, + ): + def __hash__(self): + return hash( + "AsyncFeatureOnlineStoreServiceRestTransport.GenerateFetchAccessToken" + ) + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: feature_online_store_service.GenerateFetchAccessTokenRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> feature_online_store_service.GenerateFetchAccessTokenResponse: + r"""Call the generate fetch access + token method over HTTP. + + Args: + request (~.feature_online_store_service.GenerateFetchAccessTokenRequest): + The request object. Request message for + [FeatureOnlineStoreService.GenerateFetchAccessToken][google.cloud.aiplatform.v1.FeatureOnlineStoreService.GenerateFetchAccessToken]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.feature_online_store_service.GenerateFetchAccessTokenResponse: + Response message for + [FeatureOnlineStoreService.GenerateFetchAccessToken][google.cloud.aiplatform.v1.FeatureOnlineStoreService.GenerateFetchAccessToken]. + + """ + + http_options = ( + _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken._get_http_options() + ) + + request, metadata = await self._interceptor.pre_generate_fetch_access_token( + request, metadata + ) + transcoded_request = _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken._get_transcoded_request( + http_options, request + ) + + body = _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseFeatureOnlineStoreServiceRestTransport._BaseGenerateFetchAccessToken._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.FeatureOnlineStoreServiceClient.GenerateFetchAccessToken", + extra={ + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", + "rpcName": "GenerateFetchAccessToken", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncFeatureOnlineStoreServiceRestTransport._GenerateFetchAccessToken._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = feature_online_store_service.GenerateFetchAccessTokenResponse() + pb_resp = feature_online_store_service.GenerateFetchAccessTokenResponse.pb( + resp + ) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_generate_fetch_access_token(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_generate_fetch_access_token_with_metadata( + resp, response_metadata + ) + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = feature_online_store_service.GenerateFetchAccessTokenResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.FeatureOnlineStoreServiceAsyncClient.generate_fetch_access_token", + extra={ + "serviceName": "google.cloud.aiplatform.v1.FeatureOnlineStoreService", + "rpcName": "GenerateFetchAccessToken", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + class _SearchNearestEntities( _BaseFeatureOnlineStoreServiceRestTransport._BaseSearchNearestEntities, AsyncFeatureOnlineStoreServiceRestStub, @@ -858,6 +1180,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_nearest_entities(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_search_nearest_entities_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -884,6 +1212,15 @@ async def __call__( return resp + @property + def feature_view_direct_write( + self, + ) -> Callable[ + [feature_online_store_service.FeatureViewDirectWriteRequest], + feature_online_store_service.FeatureViewDirectWriteResponse, + ]: + return self._FeatureViewDirectWrite(self._session, self._host, self._interceptor) # type: ignore + @property def fetch_feature_values( self, @@ -893,6 +1230,15 @@ def fetch_feature_values( ]: return self._FetchFeatureValues(self._session, self._host, self._interceptor) # type: ignore + @property + def generate_fetch_access_token( + self, + ) -> Callable[ + [feature_online_store_service.GenerateFetchAccessTokenRequest], + feature_online_store_service.GenerateFetchAccessTokenResponse, + ]: + return self._GenerateFetchAccessToken(self._session, self._host, self._interceptor) # type: ignore + @property def search_nearest_entities( self, @@ -944,7 +1290,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -1095,7 +1440,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1246,7 +1590,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1398,7 +1741,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1556,7 +1898,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1707,7 +2048,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1830,7 +2170,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1953,7 +2292,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -2104,7 +2442,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2255,7 +2592,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_base.py index c478be3b13..3b9f2f22f8 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -93,6 +93,10 @@ def __init__( api_audience=api_audience, ) + class _BaseFeatureViewDirectWrite: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + class _BaseFetchFeatureValues: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -152,6 +156,50 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseGenerateFetchAccessToken: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{feature_view=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:generateFetchAccessToken", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = ( + feature_online_store_service.GenerateFetchAccessTokenRequest.pb(request) + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseSearchNearestEntities: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -332,6 +380,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -418,6 +470,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -496,6 +553,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -677,6 +738,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -813,6 +878,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -821,6 +890,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -861,6 +938,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1042,6 +1123,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1070,6 +1155,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1182,6 +1271,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1190,6 +1283,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1250,6 +1351,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1431,6 +1536,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1471,6 +1580,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1583,6 +1696,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1591,6 +1708,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1651,6 +1776,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1828,6 +1957,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1868,6 +2001,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -1980,6 +2117,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2000,6 +2145,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2048,6 +2197,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2233,6 +2386,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2265,6 +2422,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2373,6 +2534,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2381,6 +2546,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2441,6 +2614,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/__init__.py b/google/cloud/aiplatform_v1/services/feature_registry_service/__init__.py index 41c90cfd29..6b1b86a740 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py index 4e75484161..65ef956dee 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.feature_registry_service import pagers from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature as gca_feature @@ -58,9 +57,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import FeatureRegistryServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import FeatureRegistryServiceGrpcAsyncIOTransport from .client import FeatureRegistryServiceClient @@ -135,7 +136,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeatureRegistryServiceAsyncClient: The constructed client. """ - return FeatureRegistryServiceClient.from_service_account_info.__func__(FeatureRegistryServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeatureRegistryServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(FeatureRegistryServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -151,7 +155,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeatureRegistryServiceAsyncClient: The constructed client. """ - return FeatureRegistryServiceClient.from_service_account_file.__func__(FeatureRegistryServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeatureRegistryServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + FeatureRegistryServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file @@ -296,21 +305,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeatureRegistryServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", + "credentialsType": None, + } + ), ) async def create_feature_group( @@ -416,7 +427,10 @@ async def sample_create_feature_group(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature_group, feature_group_id]) + flattened_params = [parent, feature_group, feature_group_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -536,7 +550,10 @@ async def sample_get_feature_group(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -651,7 +668,10 @@ async def sample_list_feature_groups(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -776,10 +796,10 @@ async def sample_update_feature_group(): Updatable fields: - - ``labels`` - - ``description`` - - ``big_query`` - - ``big_query.entity_id_columns`` + - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -804,7 +824,10 @@ async def sample_update_feature_group(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_group, update_mask]) + flattened_params = [feature_group, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -951,7 +974,10 @@ async def sample_delete_feature_group(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) + flattened_params = [name, force] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1107,7 +1133,10 @@ async def sample_create_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature, feature_id]) + flattened_params = [parent, feature, feature_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1259,7 +1288,10 @@ async def sample_batch_create_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) + flattened_params = [parent, requests] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1383,7 +1415,10 @@ async def sample_get_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1502,7 +1537,10 @@ async def sample_list_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1626,12 +1664,12 @@ async def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1656,7 +1694,10 @@ async def sample_update_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) + flattened_params = [feature, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1796,7 +1837,10 @@ async def sample_delete_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2555,5 +2599,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("FeatureRegistryServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/client.py b/google/cloud/aiplatform_v1/services/feature_registry_service/client.py index 7ded4d3d77..113ca24a8f 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/client.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.feature_registry_service import pagers from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature as gca_feature @@ -72,9 +73,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import FeatureRegistryServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import FeatureRegistryServiceGrpcTransport from .transports.grpc_asyncio import FeatureRegistryServiceGrpcAsyncIOTransport @@ -176,6 +179,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -389,12 +420,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = FeatureRegistryServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -402,7 +429,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -434,20 +461,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = FeatureRegistryServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -542,6 +563,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -634,11 +682,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeatureRegistryServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeatureRegistryServiceClient._read_environment_variables() + ) self._client_cert_source = FeatureRegistryServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -751,21 +797,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeatureRegistryServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", + "credentialsType": None, + } + ), ) def create_feature_group( @@ -871,7 +921,10 @@ def sample_create_feature_group(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature_group, feature_group_id]) + flattened_params = [parent, feature_group, feature_group_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -988,7 +1041,10 @@ def sample_get_feature_group(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1100,7 +1156,10 @@ def sample_list_feature_groups(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1222,10 +1281,10 @@ def sample_update_feature_group(): Updatable fields: - - ``labels`` - - ``description`` - - ``big_query`` - - ``big_query.entity_id_columns`` + - ``labels`` + - ``description`` + - ``big_query`` + - ``big_query.entity_id_columns`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1250,7 +1309,10 @@ def sample_update_feature_group(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature_group, update_mask]) + flattened_params = [feature_group, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1394,7 +1456,10 @@ def sample_delete_feature_group(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) + flattened_params = [name, force] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1547,7 +1612,10 @@ def sample_create_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature, feature_id]) + flattened_params = [parent, feature, feature_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1696,7 +1764,10 @@ def sample_batch_create_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) + flattened_params = [parent, requests] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1817,7 +1888,10 @@ def sample_get_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1933,7 +2007,10 @@ def sample_list_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2054,12 +2131,12 @@ def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2084,7 +2161,10 @@ def sample_update_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) + flattened_params = [feature, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2221,7 +2301,10 @@ def sample_delete_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2326,16 +2409,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -2381,16 +2468,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -2553,16 +2644,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -2674,16 +2769,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -2796,16 +2895,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -2856,16 +2959,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -2911,16 +3018,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -2966,21 +3077,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("FeatureRegistryServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/pagers.py b/google/cloud/aiplatform_v1/services/feature_registry_service/pagers.py index 6e041eda7f..3df24f7f15 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/__init__.py index c591e51274..f5ea1cd995 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py index cfaf443930..5a3217c8c8 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import feature from google.cloud.aiplatform_v1.types import feature_group @@ -40,6 +41,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeatureRegistryServiceTransport(abc.ABC): """Abstract transport class for FeatureRegistryService.""" @@ -71,9 +75,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -86,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -102,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -387,13 +395,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc.py index a65b81ae85..808144b92f 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -78,12 +78,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.FeatureRegistryService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -161,9 +160,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -297,9 +297,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc_asyncio.py index 193e6751af..78f193547b 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -157,8 +157,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -209,9 +210,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py index f4676c6e43..f02a9bf94f 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -66,6 +67,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeatureRegistryServiceRestInterceptor: """Interceptor for FeatureRegistryService. @@ -196,12 +200,35 @@ def post_batch_create_features( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_create_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_create_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_batch_create_features` interceptor runs + before the `post_batch_create_features_with_metadata` interceptor. """ return response + def post_batch_create_features_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_create_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_batch_create_features_with_metadata` + interceptor in new development instead of the `post_batch_create_features` interceptor. + When both interceptors are used, this `post_batch_create_features_with_metadata` interceptor runs after the + `post_batch_create_features` interceptor. The (possibly modified) response returned by + `post_batch_create_features` will be passed to + `post_batch_create_features_with_metadata`. + """ + return response, metadata + def pre_create_feature( self, request: featurestore_service.CreateFeatureRequest, @@ -222,12 +249,35 @@ def post_create_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature` interceptor runs + before the `post_create_feature_with_metadata` interceptor. """ return response + def post_create_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_create_feature_with_metadata` + interceptor in new development instead of the `post_create_feature` interceptor. + When both interceptors are used, this `post_create_feature_with_metadata` interceptor runs after the + `post_create_feature` interceptor. The (possibly modified) response returned by + `post_create_feature` will be passed to + `post_create_feature_with_metadata`. + """ + return response, metadata + def pre_create_feature_group( self, request: feature_registry_service.CreateFeatureGroupRequest, @@ -248,12 +298,35 @@ def post_create_feature_group( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature_group - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_group_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature_group` interceptor runs + before the `post_create_feature_group_with_metadata` interceptor. """ return response + def post_create_feature_group_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature_group + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_create_feature_group_with_metadata` + interceptor in new development instead of the `post_create_feature_group` interceptor. + When both interceptors are used, this `post_create_feature_group_with_metadata` interceptor runs after the + `post_create_feature_group` interceptor. The (possibly modified) response returned by + `post_create_feature_group` will be passed to + `post_create_feature_group_with_metadata`. + """ + return response, metadata + def pre_delete_feature( self, request: featurestore_service.DeleteFeatureRequest, @@ -274,12 +347,35 @@ def post_delete_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature` interceptor runs + before the `post_delete_feature_with_metadata` interceptor. """ return response + def post_delete_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_with_metadata` + interceptor in new development instead of the `post_delete_feature` interceptor. + When both interceptors are used, this `post_delete_feature_with_metadata` interceptor runs after the + `post_delete_feature` interceptor. The (possibly modified) response returned by + `post_delete_feature` will be passed to + `post_delete_feature_with_metadata`. + """ + return response, metadata + def pre_delete_feature_group( self, request: feature_registry_service.DeleteFeatureGroupRequest, @@ -300,12 +396,35 @@ def post_delete_feature_group( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature_group - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_group_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature_group` interceptor runs + before the `post_delete_feature_group_with_metadata` interceptor. """ return response + def post_delete_feature_group_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature_group + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_group_with_metadata` + interceptor in new development instead of the `post_delete_feature_group` interceptor. + When both interceptors are used, this `post_delete_feature_group_with_metadata` interceptor runs after the + `post_delete_feature_group` interceptor. The (possibly modified) response returned by + `post_delete_feature_group` will be passed to + `post_delete_feature_group_with_metadata`. + """ + return response, metadata + def pre_get_feature( self, request: featurestore_service.GetFeatureRequest, @@ -323,12 +442,35 @@ def pre_get_feature( def post_get_feature(self, response: feature.Feature) -> feature.Feature: """Post-rpc interceptor for get_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature` interceptor runs + before the `post_get_feature_with_metadata` interceptor. """ return response + def post_get_feature_with_metadata( + self, + response: feature.Feature, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[feature.Feature, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_get_feature_with_metadata` + interceptor in new development instead of the `post_get_feature` interceptor. + When both interceptors are used, this `post_get_feature_with_metadata` interceptor runs after the + `post_get_feature` interceptor. The (possibly modified) response returned by + `post_get_feature` will be passed to + `post_get_feature_with_metadata`. + """ + return response, metadata + def pre_get_feature_group( self, request: feature_registry_service.GetFeatureGroupRequest, @@ -349,12 +491,35 @@ def post_get_feature_group( ) -> feature_group.FeatureGroup: """Post-rpc interceptor for get_feature_group - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_group_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature_group` interceptor runs + before the `post_get_feature_group_with_metadata` interceptor. """ return response + def post_get_feature_group_with_metadata( + self, + response: feature_group.FeatureGroup, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[feature_group.FeatureGroup, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_feature_group + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_get_feature_group_with_metadata` + interceptor in new development instead of the `post_get_feature_group` interceptor. + When both interceptors are used, this `post_get_feature_group_with_metadata` interceptor runs after the + `post_get_feature_group` interceptor. The (possibly modified) response returned by + `post_get_feature_group` will be passed to + `post_get_feature_group_with_metadata`. + """ + return response, metadata + def pre_list_feature_groups( self, request: feature_registry_service.ListFeatureGroupsRequest, @@ -375,12 +540,38 @@ def post_list_feature_groups( ) -> feature_registry_service.ListFeatureGroupsResponse: """Post-rpc interceptor for list_feature_groups - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_feature_groups_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_list_feature_groups` interceptor runs + before the `post_list_feature_groups_with_metadata` interceptor. """ return response + def post_list_feature_groups_with_metadata( + self, + response: feature_registry_service.ListFeatureGroupsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_registry_service.ListFeatureGroupsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_feature_groups + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_list_feature_groups_with_metadata` + interceptor in new development instead of the `post_list_feature_groups` interceptor. + When both interceptors are used, this `post_list_feature_groups_with_metadata` interceptor runs after the + `post_list_feature_groups` interceptor. The (possibly modified) response returned by + `post_list_feature_groups` will be passed to + `post_list_feature_groups_with_metadata`. + """ + return response, metadata + def pre_list_features( self, request: featurestore_service.ListFeaturesRequest, @@ -401,12 +592,38 @@ def post_list_features( ) -> featurestore_service.ListFeaturesResponse: """Post-rpc interceptor for list_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_list_features` interceptor runs + before the `post_list_features_with_metadata` interceptor. """ return response + def post_list_features_with_metadata( + self, + response: featurestore_service.ListFeaturesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.ListFeaturesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_list_features_with_metadata` + interceptor in new development instead of the `post_list_features` interceptor. + When both interceptors are used, this `post_list_features_with_metadata` interceptor runs after the + `post_list_features` interceptor. The (possibly modified) response returned by + `post_list_features` will be passed to + `post_list_features_with_metadata`. + """ + return response, metadata + def pre_update_feature( self, request: featurestore_service.UpdateFeatureRequest, @@ -427,12 +644,35 @@ def post_update_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature` interceptor runs + before the `post_update_feature_with_metadata` interceptor. """ return response + def post_update_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_update_feature_with_metadata` + interceptor in new development instead of the `post_update_feature` interceptor. + When both interceptors are used, this `post_update_feature_with_metadata` interceptor runs after the + `post_update_feature` interceptor. The (possibly modified) response returned by + `post_update_feature` will be passed to + `post_update_feature_with_metadata`. + """ + return response, metadata + def pre_update_feature_group( self, request: feature_registry_service.UpdateFeatureGroupRequest, @@ -453,12 +693,35 @@ def post_update_feature_group( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_feature_group - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_group_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature_group` interceptor runs + before the `post_update_feature_group_with_metadata` interceptor. """ return response + def post_update_feature_group_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature_group + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_update_feature_group_with_metadata` + interceptor in new development instead of the `post_update_feature_group` interceptor. + When both interceptors are used, this `post_update_feature_group_with_metadata` interceptor runs after the + `post_update_feature_group` interceptor. The (possibly modified) response returned by + `post_update_feature_group` will be passed to + `post_update_feature_group_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -749,9 +1012,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -962,6 +1226,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1098,6 +1366,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1106,6 +1378,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1146,6 +1426,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1308,6 +1592,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1336,6 +1624,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1448,6 +1740,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1456,6 +1752,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1516,6 +1820,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1678,6 +1986,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1718,6 +2030,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1830,6 +2146,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1838,6 +2158,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1898,6 +2226,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2056,6 +2388,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2096,6 +2432,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2208,6 +2548,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2228,6 +2576,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2276,6 +2628,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2442,6 +2798,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2474,6 +2834,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2582,6 +2946,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2590,6 +2958,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2650,6 +3026,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2760,7 +3140,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2802,6 +3182,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_batch_create_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_batch_create_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2914,7 +3298,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2954,6 +3338,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3066,7 +3454,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3108,6 +3496,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_feature_group(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_feature_group_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3215,7 +3607,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3254,6 +3646,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3361,7 +3757,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3402,6 +3798,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_feature_group(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_feature_group_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3550,6 +3950,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3697,6 +4101,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_feature_group(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_feature_group_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3846,6 +4254,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_feature_groups(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_feature_groups_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3999,6 +4411,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4113,7 +4529,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4153,6 +4569,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4265,7 +4685,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4307,6 +4727,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_feature_group(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_feature_group_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4482,7 +4906,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -4625,7 +5048,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -4768,7 +5190,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4912,7 +5333,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -5060,7 +5480,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -5207,7 +5626,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -5326,7 +5744,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -5445,7 +5862,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -5588,7 +6004,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -5733,7 +6148,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_asyncio.py index d55fd66166..b029900a45 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -84,6 +84,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncFeatureRegistryServiceRestInterceptor: """Asynchronous Interceptor for FeatureRegistryService. @@ -214,12 +217,35 @@ async def post_batch_create_features( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_create_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_create_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_batch_create_features` interceptor runs + before the `post_batch_create_features_with_metadata` interceptor. """ return response + async def post_batch_create_features_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_create_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_batch_create_features_with_metadata` + interceptor in new development instead of the `post_batch_create_features` interceptor. + When both interceptors are used, this `post_batch_create_features_with_metadata` interceptor runs after the + `post_batch_create_features` interceptor. The (possibly modified) response returned by + `post_batch_create_features` will be passed to + `post_batch_create_features_with_metadata`. + """ + return response, metadata + async def pre_create_feature( self, request: featurestore_service.CreateFeatureRequest, @@ -240,12 +266,35 @@ async def post_create_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature` interceptor runs + before the `post_create_feature_with_metadata` interceptor. """ return response + async def post_create_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_create_feature_with_metadata` + interceptor in new development instead of the `post_create_feature` interceptor. + When both interceptors are used, this `post_create_feature_with_metadata` interceptor runs after the + `post_create_feature` interceptor. The (possibly modified) response returned by + `post_create_feature` will be passed to + `post_create_feature_with_metadata`. + """ + return response, metadata + async def pre_create_feature_group( self, request: feature_registry_service.CreateFeatureGroupRequest, @@ -266,12 +315,35 @@ async def post_create_feature_group( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature_group - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_group_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature_group` interceptor runs + before the `post_create_feature_group_with_metadata` interceptor. """ return response + async def post_create_feature_group_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature_group + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_create_feature_group_with_metadata` + interceptor in new development instead of the `post_create_feature_group` interceptor. + When both interceptors are used, this `post_create_feature_group_with_metadata` interceptor runs after the + `post_create_feature_group` interceptor. The (possibly modified) response returned by + `post_create_feature_group` will be passed to + `post_create_feature_group_with_metadata`. + """ + return response, metadata + async def pre_delete_feature( self, request: featurestore_service.DeleteFeatureRequest, @@ -292,12 +364,35 @@ async def post_delete_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature` interceptor runs + before the `post_delete_feature_with_metadata` interceptor. """ return response + async def post_delete_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_with_metadata` + interceptor in new development instead of the `post_delete_feature` interceptor. + When both interceptors are used, this `post_delete_feature_with_metadata` interceptor runs after the + `post_delete_feature` interceptor. The (possibly modified) response returned by + `post_delete_feature` will be passed to + `post_delete_feature_with_metadata`. + """ + return response, metadata + async def pre_delete_feature_group( self, request: feature_registry_service.DeleteFeatureGroupRequest, @@ -318,12 +413,35 @@ async def post_delete_feature_group( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature_group - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_group_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature_group` interceptor runs + before the `post_delete_feature_group_with_metadata` interceptor. """ return response + async def post_delete_feature_group_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature_group + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_group_with_metadata` + interceptor in new development instead of the `post_delete_feature_group` interceptor. + When both interceptors are used, this `post_delete_feature_group_with_metadata` interceptor runs after the + `post_delete_feature_group` interceptor. The (possibly modified) response returned by + `post_delete_feature_group` will be passed to + `post_delete_feature_group_with_metadata`. + """ + return response, metadata + async def pre_get_feature( self, request: featurestore_service.GetFeatureRequest, @@ -341,12 +459,35 @@ async def pre_get_feature( async def post_get_feature(self, response: feature.Feature) -> feature.Feature: """Post-rpc interceptor for get_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature` interceptor runs + before the `post_get_feature_with_metadata` interceptor. """ return response + async def post_get_feature_with_metadata( + self, + response: feature.Feature, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[feature.Feature, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_get_feature_with_metadata` + interceptor in new development instead of the `post_get_feature` interceptor. + When both interceptors are used, this `post_get_feature_with_metadata` interceptor runs after the + `post_get_feature` interceptor. The (possibly modified) response returned by + `post_get_feature` will be passed to + `post_get_feature_with_metadata`. + """ + return response, metadata + async def pre_get_feature_group( self, request: feature_registry_service.GetFeatureGroupRequest, @@ -367,12 +508,35 @@ async def post_get_feature_group( ) -> feature_group.FeatureGroup: """Post-rpc interceptor for get_feature_group - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_group_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature_group` interceptor runs + before the `post_get_feature_group_with_metadata` interceptor. """ return response + async def post_get_feature_group_with_metadata( + self, + response: feature_group.FeatureGroup, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[feature_group.FeatureGroup, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_feature_group + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_get_feature_group_with_metadata` + interceptor in new development instead of the `post_get_feature_group` interceptor. + When both interceptors are used, this `post_get_feature_group_with_metadata` interceptor runs after the + `post_get_feature_group` interceptor. The (possibly modified) response returned by + `post_get_feature_group` will be passed to + `post_get_feature_group_with_metadata`. + """ + return response, metadata + async def pre_list_feature_groups( self, request: feature_registry_service.ListFeatureGroupsRequest, @@ -393,12 +557,38 @@ async def post_list_feature_groups( ) -> feature_registry_service.ListFeatureGroupsResponse: """Post-rpc interceptor for list_feature_groups - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_feature_groups_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_list_feature_groups` interceptor runs + before the `post_list_feature_groups_with_metadata` interceptor. """ return response + async def post_list_feature_groups_with_metadata( + self, + response: feature_registry_service.ListFeatureGroupsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + feature_registry_service.ListFeatureGroupsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_feature_groups + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_list_feature_groups_with_metadata` + interceptor in new development instead of the `post_list_feature_groups` interceptor. + When both interceptors are used, this `post_list_feature_groups_with_metadata` interceptor runs after the + `post_list_feature_groups` interceptor. The (possibly modified) response returned by + `post_list_feature_groups` will be passed to + `post_list_feature_groups_with_metadata`. + """ + return response, metadata + async def pre_list_features( self, request: featurestore_service.ListFeaturesRequest, @@ -419,12 +609,38 @@ async def post_list_features( ) -> featurestore_service.ListFeaturesResponse: """Post-rpc interceptor for list_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_list_features` interceptor runs + before the `post_list_features_with_metadata` interceptor. """ return response + async def post_list_features_with_metadata( + self, + response: featurestore_service.ListFeaturesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.ListFeaturesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_list_features_with_metadata` + interceptor in new development instead of the `post_list_features` interceptor. + When both interceptors are used, this `post_list_features_with_metadata` interceptor runs after the + `post_list_features` interceptor. The (possibly modified) response returned by + `post_list_features` will be passed to + `post_list_features_with_metadata`. + """ + return response, metadata + async def pre_update_feature( self, request: featurestore_service.UpdateFeatureRequest, @@ -445,12 +661,35 @@ async def post_update_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature` interceptor runs + before the `post_update_feature_with_metadata` interceptor. """ return response + async def post_update_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_update_feature_with_metadata` + interceptor in new development instead of the `post_update_feature` interceptor. + When both interceptors are used, this `post_update_feature_with_metadata` interceptor runs after the + `post_update_feature` interceptor. The (possibly modified) response returned by + `post_update_feature` will be passed to + `post_update_feature_with_metadata`. + """ + return response, metadata + async def pre_update_feature_group( self, request: feature_registry_service.UpdateFeatureGroupRequest, @@ -471,12 +710,35 @@ async def post_update_feature_group( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_feature_group - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_group_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeatureRegistryService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature_group` interceptor runs + before the `post_update_feature_group_with_metadata` interceptor. """ return response + async def post_update_feature_group_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature_group + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeatureRegistryService server but before it is returned to user code. + + We recommend only using this `post_update_feature_group_with_metadata` + interceptor in new development instead of the `post_update_feature_group` interceptor. + When both interceptors are used, this `post_update_feature_group_with_metadata` interceptor runs after the + `post_update_feature_group` interceptor. The (possibly modified) response returned by + `post_update_feature_group` will be passed to + `post_update_feature_group_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -788,9 +1050,9 @@ def __init__( self._interceptor = interceptor or AsyncFeatureRegistryServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -998,7 +1260,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1045,6 +1307,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_create_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_batch_create_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1160,7 +1426,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1207,6 +1473,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1320,7 +1590,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1367,6 +1637,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_feature_group(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_feature_group_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1477,7 +1751,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1523,6 +1797,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1631,7 +1909,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1677,6 +1955,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_feature_group(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_feature_group_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1833,6 +2115,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1984,6 +2270,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature_group(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_feature_group_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2137,6 +2427,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_feature_groups(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_feature_groups_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2298,6 +2592,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2415,7 +2713,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2462,6 +2760,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2575,7 +2877,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2622,6 +2924,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_feature_group(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_feature_group_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2817,6 +3123,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -2953,6 +3263,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -2961,6 +3275,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3001,6 +3323,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -3163,6 +3489,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -3191,6 +3521,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -3303,6 +3637,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3311,6 +3649,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3371,6 +3717,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3533,6 +3883,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -3573,6 +3927,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -3685,6 +4043,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3693,6 +4055,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3753,6 +4123,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3911,6 +4285,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -3951,6 +4329,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -4063,6 +4445,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -4083,6 +4473,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -4131,6 +4525,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4297,6 +4695,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -4329,6 +4731,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -4437,6 +4843,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -4445,6 +4855,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4505,6 +4923,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -4653,7 +5075,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -4804,7 +5225,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -4955,7 +5375,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -5107,7 +5526,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -5263,7 +5681,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -5414,7 +5831,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -5537,7 +5953,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -5660,7 +6075,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -5811,7 +6225,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -5962,7 +6375,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_base.py index da5d3f4387..8389f9ba83 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -788,6 +788,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -874,6 +878,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -952,6 +961,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -1133,6 +1146,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1269,6 +1286,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1277,6 +1298,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1317,6 +1346,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1498,6 +1531,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1526,6 +1563,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1638,6 +1679,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1646,6 +1691,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1706,6 +1759,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1887,6 +1944,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1927,6 +1988,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2039,6 +2104,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2047,6 +2116,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2107,6 +2184,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2284,6 +2365,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2324,6 +2409,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2436,6 +2525,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2456,6 +2553,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2504,6 +2605,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2689,6 +2794,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2721,6 +2830,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2829,6 +2942,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2837,6 +2954,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2897,6 +3022,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py index 0fc0fbe9f7..2b01888ee2 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py index ca33dbb455..c3b462a6bf 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,6 +39,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -134,7 +135,12 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeaturestoreOnlineServingServiceAsyncClient: The constructed client. """ - return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func( + FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs + ) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -150,7 +156,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeaturestoreOnlineServingServiceAsyncClient: The constructed client. """ - return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file @@ -295,21 +306,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "credentialsType": None, + } + ), ) async def read_feature_values( @@ -391,7 +404,10 @@ async def sample_read_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -519,7 +535,10 @@ async def sample_streaming_read_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -657,7 +676,10 @@ async def sample_write_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, payloads]) + flattened_params = [entity_type, payloads] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1414,5 +1436,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("FeaturestoreOnlineServingServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py index 4ad99e8df7..d0f66125ff 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -44,6 +46,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -97,14 +100,14 @@ class FeaturestoreOnlineServingServiceClientMeta(type): OrderedDict() ) # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport - _transport_registry[ - "grpc_asyncio" - ] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + _transport_registry["grpc_asyncio"] = ( + FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + ) _transport_registry["rest"] = FeaturestoreOnlineServingServiceRestTransport if HAS_ASYNC_REST_DEPENDENCIES: # pragma: NO COVER - _transport_registry[ - "rest_asyncio" - ] = AsyncFeaturestoreOnlineServingServiceRestTransport + _transport_registry["rest_asyncio"] = ( + AsyncFeaturestoreOnlineServingServiceRestTransport + ) def get_transport_class( cls, @@ -176,6 +179,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -365,12 +396,10 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = ( + FeaturestoreOnlineServingServiceClient._use_client_cert_effective() + ) use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -378,7 +407,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -410,20 +439,16 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = ( + FeaturestoreOnlineServingServiceClient._use_client_cert_effective() + ) use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -516,6 +541,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -608,11 +660,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeaturestoreOnlineServingServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeaturestoreOnlineServingServiceClient._read_environment_variables() + ) self._client_cert_source = ( FeaturestoreOnlineServingServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert @@ -733,21 +783,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "credentialsType": None, + } + ), ) def read_feature_values( @@ -829,7 +883,10 @@ def sample_read_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -952,7 +1009,10 @@ def sample_streaming_read_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1089,7 +1149,10 @@ def sample_write_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, payloads]) + flattened_params = [entity_type, payloads] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1192,16 +1255,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1247,16 +1314,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1419,16 +1490,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1540,16 +1615,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -1662,16 +1741,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -1722,16 +1805,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -1777,16 +1864,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -1832,21 +1923,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("FeaturestoreOnlineServingServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py index e921defc84..c2b0c85800 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,14 +42,14 @@ OrderedDict() ) # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport -_transport_registry[ - "grpc_asyncio" -] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport +_transport_registry["grpc_asyncio"] = ( + FeaturestoreOnlineServingServiceGrpcAsyncIOTransport +) _transport_registry["rest"] = FeaturestoreOnlineServingServiceRestTransport if HAS_REST_ASYNC: # pragma: NO COVER - _transport_registry[ - "rest_asyncio" - ] = AsyncFeaturestoreOnlineServingServiceRestTransport + _transport_registry["rest_asyncio"] = ( + AsyncFeaturestoreOnlineServingServiceRestTransport + ) __all__ = ( "FeaturestoreOnlineServingServiceTransport", diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py index aecc84b64d..3a28268832 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import featurestore_online_service from google.cloud.location import locations_pb2 # type: ignore @@ -36,6 +37,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeaturestoreOnlineServingServiceTransport(abc.ABC): """Abstract transport class for FeaturestoreOnlineServingService.""" @@ -67,9 +71,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -82,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -98,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -269,13 +277,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py index 44390159be..a08a6a6c08 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -74,12 +74,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -158,9 +157,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -293,9 +293,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -383,12 +384,12 @@ def streaming_read_feature_values( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "streaming_read_feature_values" not in self._stubs: - self._stubs[ - "streaming_read_feature_values" - ] = self._logged_channel.unary_stream( - "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues", - request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + self._stubs["streaming_read_feature_values"] = ( + self._logged_channel.unary_stream( + "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues", + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) ) return self._stubs["streaming_read_feature_values"] diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py index 9756b18ae0..b4018bedea 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -154,8 +154,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -206,9 +207,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -390,12 +392,12 @@ def streaming_read_feature_values( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "streaming_read_feature_values" not in self._stubs: - self._stubs[ - "streaming_read_feature_values" - ] = self._logged_channel.unary_stream( - "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues", - request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + self._stubs["streaming_read_feature_values"] = ( + self._logged_channel.unary_stream( + "/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues", + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) ) return self._stubs["streaming_read_feature_values"] diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py index 250186b13a..ebf9b0d601 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -62,6 +63,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeaturestoreOnlineServingServiceRestInterceptor: """Interceptor for FeaturestoreOnlineServingService. @@ -128,12 +132,38 @@ def post_read_feature_values( ) -> featurestore_online_service.ReadFeatureValuesResponse: """Post-rpc interceptor for read_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreOnlineServingService server but before - it is returned to user code. + it is returned to user code. This `post_read_feature_values` interceptor runs + before the `post_read_feature_values_with_metadata` interceptor. """ return response + def post_read_feature_values_with_metadata( + self, + response: featurestore_online_service.ReadFeatureValuesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_online_service.ReadFeatureValuesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for read_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreOnlineServingService server but before it is returned to user code. + + We recommend only using this `post_read_feature_values_with_metadata` + interceptor in new development instead of the `post_read_feature_values` interceptor. + When both interceptors are used, this `post_read_feature_values_with_metadata` interceptor runs after the + `post_read_feature_values` interceptor. The (possibly modified) response returned by + `post_read_feature_values` will be passed to + `post_read_feature_values_with_metadata`. + """ + return response, metadata + def pre_streaming_read_feature_values( self, request: featurestore_online_service.StreamingReadFeatureValuesRequest, @@ -154,12 +184,37 @@ def post_streaming_read_feature_values( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for streaming_read_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_streaming_read_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreOnlineServingService server but before - it is returned to user code. + it is returned to user code. This `post_streaming_read_feature_values` interceptor runs + before the `post_streaming_read_feature_values_with_metadata` interceptor. """ return response + def post_streaming_read_feature_values_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for streaming_read_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreOnlineServingService server but before it is returned to user code. + + We recommend only using this `post_streaming_read_feature_values_with_metadata` + interceptor in new development instead of the `post_streaming_read_feature_values` interceptor. + When both interceptors are used, this `post_streaming_read_feature_values_with_metadata` interceptor runs after the + `post_streaming_read_feature_values` interceptor. The (possibly modified) response returned by + `post_streaming_read_feature_values` will be passed to + `post_streaming_read_feature_values_with_metadata`. + """ + return response, metadata + def pre_write_feature_values( self, request: featurestore_online_service.WriteFeatureValuesRequest, @@ -180,12 +235,38 @@ def post_write_feature_values( ) -> featurestore_online_service.WriteFeatureValuesResponse: """Post-rpc interceptor for write_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_write_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreOnlineServingService server but before - it is returned to user code. + it is returned to user code. This `post_write_feature_values` interceptor runs + before the `post_write_feature_values_with_metadata` interceptor. """ return response + def post_write_feature_values_with_metadata( + self, + response: featurestore_online_service.WriteFeatureValuesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_online_service.WriteFeatureValuesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for write_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreOnlineServingService server but before it is returned to user code. + + We recommend only using this `post_write_feature_values_with_metadata` + interceptor in new development instead of the `post_write_feature_values` interceptor. + When both interceptors are used, this `post_write_feature_values_with_metadata` interceptor runs after the + `post_write_feature_values` interceptor. The (possibly modified) response returned by + `post_write_feature_values` will be passed to + `post_write_feature_values_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -477,9 +558,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -652,6 +734,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_read_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -812,6 +898,28 @@ def __call__( ) resp = self._interceptor.post_streaming_read_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_streaming_read_feature_values_with_metadata( + resp, response_metadata + ) + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", + extra={ + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "rpcName": "StreamingReadFeatureValues", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _WriteFeatureValues( @@ -946,6 +1054,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_write_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_write_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1048,7 +1160,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -1191,7 +1302,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1334,7 +1444,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1478,7 +1587,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1628,7 +1736,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1773,7 +1880,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1890,7 +1996,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -2007,7 +2112,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -2150,7 +2254,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2293,7 +2396,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_asyncio.py index 0d580bd02a..98d0566dcc 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,7 +36,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -79,6 +79,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncFeaturestoreOnlineServingServiceRestInterceptor: """Asynchronous Interceptor for FeaturestoreOnlineServingService. @@ -145,12 +148,38 @@ async def post_read_feature_values( ) -> featurestore_online_service.ReadFeatureValuesResponse: """Post-rpc interceptor for read_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreOnlineServingService server but before - it is returned to user code. + it is returned to user code. This `post_read_feature_values` interceptor runs + before the `post_read_feature_values_with_metadata` interceptor. """ return response + async def post_read_feature_values_with_metadata( + self, + response: featurestore_online_service.ReadFeatureValuesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_online_service.ReadFeatureValuesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for read_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreOnlineServingService server but before it is returned to user code. + + We recommend only using this `post_read_feature_values_with_metadata` + interceptor in new development instead of the `post_read_feature_values` interceptor. + When both interceptors are used, this `post_read_feature_values_with_metadata` interceptor runs after the + `post_read_feature_values` interceptor. The (possibly modified) response returned by + `post_read_feature_values` will be passed to + `post_read_feature_values_with_metadata`. + """ + return response, metadata + async def pre_streaming_read_feature_values( self, request: featurestore_online_service.StreamingReadFeatureValuesRequest, @@ -171,12 +200,38 @@ async def post_streaming_read_feature_values( ) -> rest_streaming_async.AsyncResponseIterator: """Post-rpc interceptor for streaming_read_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_streaming_read_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreOnlineServingService server but before - it is returned to user code. + it is returned to user code. This `post_streaming_read_feature_values` interceptor runs + before the `post_streaming_read_feature_values_with_metadata` interceptor. """ return response + async def post_streaming_read_feature_values_with_metadata( + self, + response: rest_streaming_async.AsyncResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming_async.AsyncResponseIterator, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for streaming_read_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreOnlineServingService server but before it is returned to user code. + + We recommend only using this `post_streaming_read_feature_values_with_metadata` + interceptor in new development instead of the `post_streaming_read_feature_values` interceptor. + When both interceptors are used, this `post_streaming_read_feature_values_with_metadata` interceptor runs after the + `post_streaming_read_feature_values` interceptor. The (possibly modified) response returned by + `post_streaming_read_feature_values` will be passed to + `post_streaming_read_feature_values_with_metadata`. + """ + return response, metadata + async def pre_write_feature_values( self, request: featurestore_online_service.WriteFeatureValuesRequest, @@ -197,12 +252,38 @@ async def post_write_feature_values( ) -> featurestore_online_service.WriteFeatureValuesResponse: """Post-rpc interceptor for write_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_write_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreOnlineServingService server but before - it is returned to user code. + it is returned to user code. This `post_write_feature_values` interceptor runs + before the `post_write_feature_values_with_metadata` interceptor. """ return response + async def post_write_feature_values_with_metadata( + self, + response: featurestore_online_service.WriteFeatureValuesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_online_service.WriteFeatureValuesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for write_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreOnlineServingService server but before it is returned to user code. + + We recommend only using this `post_write_feature_values_with_metadata` + interceptor in new development instead of the `post_write_feature_values` interceptor. + When both interceptors are used, this `post_write_feature_values_with_metadata` interceptor runs after the + `post_write_feature_values` interceptor. The (possibly modified) response returned by + `post_write_feature_values` will be passed to + `post_write_feature_values_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -730,6 +811,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_read_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_read_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -825,11 +910,10 @@ async def __call__( _BaseFeaturestoreOnlineServingServiceRestTransport._BaseStreamingReadFeatureValues._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_streaming_read_feature_values( - request, metadata + request, metadata = ( + await self._interceptor.pre_streaming_read_feature_values( + request, metadata + ) ) transcoded_request = _BaseFeaturestoreOnlineServingServiceRestTransport._BaseStreamingReadFeatureValues._get_transcoded_request( http_options, request @@ -898,6 +982,29 @@ async def __call__( response, featurestore_online_service.ReadFeatureValuesResponse ) resp = await self._interceptor.post_streaming_read_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_streaming_read_feature_values_with_metadata( + resp, response_metadata + ) + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", + extra={ + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreOnlineServingService", + "rpcName": "StreamingReadFeatureValues", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _WriteFeatureValues( @@ -1037,6 +1144,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_write_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_write_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1136,7 +1247,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -1289,7 +1399,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1442,7 +1551,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1596,7 +1704,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1754,7 +1861,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1907,7 +2013,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -2032,7 +2137,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -2157,7 +2261,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -2310,7 +2413,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2463,7 +2565,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_base.py index afe0164a7b..7cb6b9593c 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -395,6 +395,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -481,6 +485,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -559,6 +568,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -740,6 +753,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -876,6 +893,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -884,6 +905,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -924,6 +953,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1105,6 +1138,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1133,6 +1170,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1245,6 +1286,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1253,6 +1298,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1313,6 +1366,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1494,6 +1551,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1534,6 +1595,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1646,6 +1711,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1654,6 +1723,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1714,6 +1791,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1891,6 +1972,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1931,6 +2016,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2043,6 +2132,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2063,6 +2160,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2111,6 +2212,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2296,6 +2401,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2328,6 +2437,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2436,6 +2549,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2444,6 +2561,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2504,6 +2629,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py b/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py index 86a8086d22..c709802e45 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py index 1a66a83db8..ed619bd379 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.featurestore_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import entity_type @@ -61,9 +60,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport from .client import FeaturestoreServiceClient @@ -140,7 +141,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: FeaturestoreServiceAsyncClient: The constructed client. """ - return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + FeaturestoreServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(FeaturestoreServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -156,7 +160,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: FeaturestoreServiceAsyncClient: The constructed client. """ - return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + FeaturestoreServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -301,21 +308,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeaturestoreServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", + "credentialsType": None, + } + ), ) async def create_featurestore( @@ -418,7 +427,10 @@ async def sample_create_featurestore(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore, featurestore_id]) + flattened_params = [parent, featurestore, featurestore_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -543,7 +555,10 @@ async def sample_get_featurestore(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -658,7 +673,10 @@ async def sample_list_featurestores(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -779,10 +797,10 @@ async def sample_update_featurestore(): Updatable fields: - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - ``online_serving_config.scaling`` - - ``online_storage_ttl_days`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -808,7 +826,10 @@ async def sample_update_featurestore(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore, update_mask]) + flattened_params = [featurestore, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -958,7 +979,10 @@ async def sample_delete_featurestore(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) + flattened_params = [name, force] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1109,7 +1133,10 @@ async def sample_create_entity_type(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type, entity_type_id]) + flattened_params = [parent, entity_type, entity_type_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1235,7 +1262,10 @@ async def sample_get_entity_type(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1350,7 +1380,10 @@ async def sample_list_entity_types(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1467,16 +1500,16 @@ async def sample_update_entity_type(): Updatable fields: - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` - - ``monitoring_config.snapshot_analysis.staleness_days`` - - ``monitoring_config.import_features_analysis.state`` - - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - - ``monitoring_config.numerical_threshold_config.value`` - - ``monitoring_config.categorical_threshold_config.value`` - - ``offline_storage_ttl_days`` + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1502,7 +1535,10 @@ async def sample_update_entity_type(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, update_mask]) + flattened_params = [entity_type, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1643,7 +1679,10 @@ async def sample_delete_entity_type(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) + flattened_params = [name, force] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1799,7 +1838,10 @@ async def sample_create_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature, feature_id]) + flattened_params = [parent, feature, feature_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1951,7 +1993,10 @@ async def sample_batch_create_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) + flattened_params = [parent, requests] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2075,7 +2120,10 @@ async def sample_get_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2194,7 +2242,10 @@ async def sample_list_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2314,12 +2365,12 @@ async def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2342,7 +2393,10 @@ async def sample_update_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) + flattened_params = [feature, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2474,7 +2528,10 @@ async def sample_delete_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2633,7 +2690,10 @@ async def sample_import_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2779,7 +2839,10 @@ async def sample_batch_read_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore]) + flattened_params = [featurestore] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2915,7 +2978,10 @@ async def sample_export_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3059,7 +3125,10 @@ async def sample_delete_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3178,15 +3247,15 @@ async def sample_search_features(): to a sequence of words (i.e. tokens) for comparison. This is done by: - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches characters + within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. A QUERY must be either a singular token or a phrase. A phrase is one or multiple words enclosed in double @@ -3196,47 +3265,47 @@ async def sample_search_features(): Supported FIELDs for field-restricted queries: - - ``feature_id`` - - ``description`` - - ``entity_type_id`` + - ``feature_id`` + - ``description`` + - ``entity_type_id`` Examples: - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with + ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. Besides field queries, the following exact-match filters are supported. The exact-match filters do not support wildcards. Unlike field-restricted queries, exact-match filters are case-sensitive. - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. Examples: - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this @@ -3261,7 +3330,10 @@ async def sample_search_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([location, query]) + flattened_params = [location, query] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4025,5 +4097,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("FeaturestoreServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_service/client.py index ef45471379..dcf1e51c1b 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.featurestore_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import entity_type @@ -75,9 +76,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import FeaturestoreServiceGrpcTransport from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport @@ -179,6 +182,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -416,12 +447,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = FeaturestoreServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -429,7 +456,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -461,20 +488,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = FeaturestoreServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -567,6 +588,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -659,11 +707,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = FeaturestoreServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + FeaturestoreServiceClient._read_environment_variables() + ) self._client_cert_source = FeaturestoreServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -776,21 +822,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.FeaturestoreServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", + "credentialsType": None, + } + ), ) def create_featurestore( @@ -893,7 +943,10 @@ def sample_create_featurestore(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore, featurestore_id]) + flattened_params = [parent, featurestore, featurestore_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1015,7 +1068,10 @@ def sample_get_featurestore(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1127,7 +1183,10 @@ def sample_list_featurestores(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1245,10 +1304,10 @@ def sample_update_featurestore(): Updatable fields: - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - ``online_serving_config.scaling`` - - ``online_storage_ttl_days`` + - ``labels`` + - ``online_serving_config.fixed_node_count`` + - ``online_serving_config.scaling`` + - ``online_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1274,7 +1333,10 @@ def sample_update_featurestore(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore, update_mask]) + flattened_params = [featurestore, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1421,7 +1483,10 @@ def sample_delete_featurestore(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) + flattened_params = [name, force] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1569,7 +1634,10 @@ def sample_create_entity_type(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type, entity_type_id]) + flattened_params = [parent, entity_type, entity_type_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1692,7 +1760,10 @@ def sample_get_entity_type(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1804,7 +1875,10 @@ def sample_list_entity_types(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1918,16 +1992,16 @@ def sample_update_entity_type(): Updatable fields: - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` - - ``monitoring_config.snapshot_analysis.staleness_days`` - - ``monitoring_config.import_features_analysis.state`` - - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` - - ``monitoring_config.numerical_threshold_config.value`` - - ``monitoring_config.categorical_threshold_config.value`` - - ``offline_storage_ttl_days`` + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval_days`` + - ``monitoring_config.snapshot_analysis.staleness_days`` + - ``monitoring_config.import_features_analysis.state`` + - ``monitoring_config.import_features_analysis.anomaly_detection_baseline`` + - ``monitoring_config.numerical_threshold_config.value`` + - ``monitoring_config.categorical_threshold_config.value`` + - ``offline_storage_ttl_days`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1953,7 +2027,10 @@ def sample_update_entity_type(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, update_mask]) + flattened_params = [entity_type, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2091,7 +2168,10 @@ def sample_delete_entity_type(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) + flattened_params = [name, force] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2244,7 +2324,10 @@ def sample_create_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature, feature_id]) + flattened_params = [parent, feature, feature_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2393,7 +2476,10 @@ def sample_batch_create_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) + flattened_params = [parent, requests] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2514,7 +2600,10 @@ def sample_get_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2630,7 +2719,10 @@ def sample_list_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2747,12 +2839,12 @@ def sample_update_feature(): Updatable fields: - - ``description`` - - ``labels`` - - ``disable_monitoring`` (Not supported for - FeatureRegistryService Feature) - - ``point_of_contact`` (Not supported for - FeaturestoreService FeatureStore) + - ``description`` + - ``labels`` + - ``disable_monitoring`` (Not supported for + FeatureRegistryService Feature) + - ``point_of_contact`` (Not supported for + FeaturestoreService FeatureStore) This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -2775,7 +2867,10 @@ def sample_update_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) + flattened_params = [feature, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2904,7 +2999,10 @@ def sample_delete_feature(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3060,7 +3158,10 @@ def sample_import_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3203,7 +3304,10 @@ def sample_batch_read_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore]) + flattened_params = [featurestore] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3338,7 +3442,10 @@ def sample_export_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3479,7 +3586,10 @@ def sample_delete_feature_values(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) + flattened_params = [entity_type] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3595,15 +3705,15 @@ def sample_search_features(): to a sequence of words (i.e. tokens) for comparison. This is done by: - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. + - Removing leading/trailing whitespace and tokenizing + the search value. Characters that are not one of + alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or + asterisk ``*`` are treated as delimiters for tokens. + ``*`` is treated as a wildcard that matches characters + within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. A QUERY must be either a singular token or a phrase. A phrase is one or multiple words enclosed in double @@ -3613,47 +3723,47 @@ def sample_search_features(): Supported FIELDs for field-restricted queries: - - ``feature_id`` - - ``description`` - - ``entity_type_id`` + - ``feature_id`` + - ``description`` + - ``entity_type_id`` Examples: - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with + ID containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. Besides field queries, the following exact-match filters are supported. The exact-match filters do not support wildcards. Unlike field-restricted queries, exact-match filters are case-sensitive. - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. Examples: - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label + with ``env`` as the key. This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this @@ -3678,7 +3788,10 @@ def sample_search_features(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([location, query]) + flattened_params = [location, query] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3788,16 +3901,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -3843,16 +3960,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -4015,16 +4136,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -4136,16 +4261,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -4258,16 +4387,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -4318,16 +4451,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -4373,16 +4510,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -4428,21 +4569,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("FeaturestoreServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py b/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py index bd1afb27a2..c3e82304f0 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py index c0f3af3029..81505467c8 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py index 51b282901b..c6c97f20ae 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import entity_type from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type @@ -42,6 +43,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeaturestoreServiceTransport(abc.ABC): """Abstract transport class for FeaturestoreService.""" @@ -73,9 +77,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -88,8 +93,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -104,11 +107,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -535,13 +543,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py index 1122b0636d..c0631d0614 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -80,12 +80,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.FeaturestoreService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -163,9 +162,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -299,9 +299,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py index 36c7c714c6..0b6ef8e817 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -159,8 +159,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -211,9 +212,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py index ffadee70cf..d3e6aa42de 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -68,6 +69,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class FeaturestoreServiceRestInterceptor: """Interceptor for FeaturestoreService. @@ -278,12 +282,35 @@ def post_batch_create_features( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_create_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_create_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_batch_create_features` interceptor runs + before the `post_batch_create_features_with_metadata` interceptor. """ return response + def post_batch_create_features_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_create_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_batch_create_features_with_metadata` + interceptor in new development instead of the `post_batch_create_features` interceptor. + When both interceptors are used, this `post_batch_create_features_with_metadata` interceptor runs after the + `post_batch_create_features` interceptor. The (possibly modified) response returned by + `post_batch_create_features` will be passed to + `post_batch_create_features_with_metadata`. + """ + return response, metadata + def pre_batch_read_feature_values( self, request: featurestore_service.BatchReadFeatureValuesRequest, @@ -304,12 +331,35 @@ def post_batch_read_feature_values( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_read_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_read_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_batch_read_feature_values` interceptor runs + before the `post_batch_read_feature_values_with_metadata` interceptor. """ return response + def post_batch_read_feature_values_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_read_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_batch_read_feature_values_with_metadata` + interceptor in new development instead of the `post_batch_read_feature_values` interceptor. + When both interceptors are used, this `post_batch_read_feature_values_with_metadata` interceptor runs after the + `post_batch_read_feature_values` interceptor. The (possibly modified) response returned by + `post_batch_read_feature_values` will be passed to + `post_batch_read_feature_values_with_metadata`. + """ + return response, metadata + def pre_create_entity_type( self, request: featurestore_service.CreateEntityTypeRequest, @@ -330,12 +380,35 @@ def post_create_entity_type( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_entity_type - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_entity_type_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_create_entity_type` interceptor runs + before the `post_create_entity_type_with_metadata` interceptor. """ return response + def post_create_entity_type_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_entity_type + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_create_entity_type_with_metadata` + interceptor in new development instead of the `post_create_entity_type` interceptor. + When both interceptors are used, this `post_create_entity_type_with_metadata` interceptor runs after the + `post_create_entity_type` interceptor. The (possibly modified) response returned by + `post_create_entity_type` will be passed to + `post_create_entity_type_with_metadata`. + """ + return response, metadata + def pre_create_feature( self, request: featurestore_service.CreateFeatureRequest, @@ -356,12 +429,35 @@ def post_create_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature` interceptor runs + before the `post_create_feature_with_metadata` interceptor. """ return response + def post_create_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_create_feature_with_metadata` + interceptor in new development instead of the `post_create_feature` interceptor. + When both interceptors are used, this `post_create_feature_with_metadata` interceptor runs after the + `post_create_feature` interceptor. The (possibly modified) response returned by + `post_create_feature` will be passed to + `post_create_feature_with_metadata`. + """ + return response, metadata + def pre_create_featurestore( self, request: featurestore_service.CreateFeaturestoreRequest, @@ -382,12 +478,35 @@ def post_create_featurestore( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_featurestore - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_featurestore_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_create_featurestore` interceptor runs + before the `post_create_featurestore_with_metadata` interceptor. """ return response + def post_create_featurestore_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_featurestore + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_create_featurestore_with_metadata` + interceptor in new development instead of the `post_create_featurestore` interceptor. + When both interceptors are used, this `post_create_featurestore_with_metadata` interceptor runs after the + `post_create_featurestore` interceptor. The (possibly modified) response returned by + `post_create_featurestore` will be passed to + `post_create_featurestore_with_metadata`. + """ + return response, metadata + def pre_delete_entity_type( self, request: featurestore_service.DeleteEntityTypeRequest, @@ -408,12 +527,35 @@ def post_delete_entity_type( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_entity_type - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_entity_type_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_delete_entity_type` interceptor runs + before the `post_delete_entity_type_with_metadata` interceptor. """ return response + def post_delete_entity_type_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_entity_type + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_delete_entity_type_with_metadata` + interceptor in new development instead of the `post_delete_entity_type` interceptor. + When both interceptors are used, this `post_delete_entity_type_with_metadata` interceptor runs after the + `post_delete_entity_type` interceptor. The (possibly modified) response returned by + `post_delete_entity_type` will be passed to + `post_delete_entity_type_with_metadata`. + """ + return response, metadata + def pre_delete_feature( self, request: featurestore_service.DeleteFeatureRequest, @@ -434,12 +576,35 @@ def post_delete_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature` interceptor runs + before the `post_delete_feature_with_metadata` interceptor. """ return response + def post_delete_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_with_metadata` + interceptor in new development instead of the `post_delete_feature` interceptor. + When both interceptors are used, this `post_delete_feature_with_metadata` interceptor runs after the + `post_delete_feature` interceptor. The (possibly modified) response returned by + `post_delete_feature` will be passed to + `post_delete_feature_with_metadata`. + """ + return response, metadata + def pre_delete_featurestore( self, request: featurestore_service.DeleteFeaturestoreRequest, @@ -460,12 +625,35 @@ def post_delete_featurestore( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_featurestore - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_featurestore_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_delete_featurestore` interceptor runs + before the `post_delete_featurestore_with_metadata` interceptor. """ return response + def post_delete_featurestore_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_featurestore + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_delete_featurestore_with_metadata` + interceptor in new development instead of the `post_delete_featurestore` interceptor. + When both interceptors are used, this `post_delete_featurestore_with_metadata` interceptor runs after the + `post_delete_featurestore` interceptor. The (possibly modified) response returned by + `post_delete_featurestore` will be passed to + `post_delete_featurestore_with_metadata`. + """ + return response, metadata + def pre_delete_feature_values( self, request: featurestore_service.DeleteFeatureValuesRequest, @@ -486,12 +674,35 @@ def post_delete_feature_values( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature_values` interceptor runs + before the `post_delete_feature_values_with_metadata` interceptor. """ return response + def post_delete_feature_values_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_values_with_metadata` + interceptor in new development instead of the `post_delete_feature_values` interceptor. + When both interceptors are used, this `post_delete_feature_values_with_metadata` interceptor runs after the + `post_delete_feature_values` interceptor. The (possibly modified) response returned by + `post_delete_feature_values` will be passed to + `post_delete_feature_values_with_metadata`. + """ + return response, metadata + def pre_export_feature_values( self, request: featurestore_service.ExportFeatureValuesRequest, @@ -512,12 +723,35 @@ def post_export_feature_values( ) -> operations_pb2.Operation: """Post-rpc interceptor for export_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_export_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_export_feature_values` interceptor runs + before the `post_export_feature_values_with_metadata` interceptor. """ return response + def post_export_feature_values_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for export_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_export_feature_values_with_metadata` + interceptor in new development instead of the `post_export_feature_values` interceptor. + When both interceptors are used, this `post_export_feature_values_with_metadata` interceptor runs after the + `post_export_feature_values` interceptor. The (possibly modified) response returned by + `post_export_feature_values` will be passed to + `post_export_feature_values_with_metadata`. + """ + return response, metadata + def pre_get_entity_type( self, request: featurestore_service.GetEntityTypeRequest, @@ -538,12 +772,35 @@ def post_get_entity_type( ) -> entity_type.EntityType: """Post-rpc interceptor for get_entity_type - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_entity_type_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_get_entity_type` interceptor runs + before the `post_get_entity_type_with_metadata` interceptor. """ return response + def post_get_entity_type_with_metadata( + self, + response: entity_type.EntityType, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[entity_type.EntityType, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_entity_type + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_get_entity_type_with_metadata` + interceptor in new development instead of the `post_get_entity_type` interceptor. + When both interceptors are used, this `post_get_entity_type_with_metadata` interceptor runs after the + `post_get_entity_type` interceptor. The (possibly modified) response returned by + `post_get_entity_type` will be passed to + `post_get_entity_type_with_metadata`. + """ + return response, metadata + def pre_get_feature( self, request: featurestore_service.GetFeatureRequest, @@ -561,12 +818,35 @@ def pre_get_feature( def post_get_feature(self, response: feature.Feature) -> feature.Feature: """Post-rpc interceptor for get_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature` interceptor runs + before the `post_get_feature_with_metadata` interceptor. """ return response + def post_get_feature_with_metadata( + self, + response: feature.Feature, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[feature.Feature, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_get_feature_with_metadata` + interceptor in new development instead of the `post_get_feature` interceptor. + When both interceptors are used, this `post_get_feature_with_metadata` interceptor runs after the + `post_get_feature` interceptor. The (possibly modified) response returned by + `post_get_feature` will be passed to + `post_get_feature_with_metadata`. + """ + return response, metadata + def pre_get_featurestore( self, request: featurestore_service.GetFeaturestoreRequest, @@ -587,12 +867,35 @@ def post_get_featurestore( ) -> featurestore.Featurestore: """Post-rpc interceptor for get_featurestore - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_featurestore_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_get_featurestore` interceptor runs + before the `post_get_featurestore_with_metadata` interceptor. """ return response + def post_get_featurestore_with_metadata( + self, + response: featurestore.Featurestore, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[featurestore.Featurestore, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_featurestore + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_get_featurestore_with_metadata` + interceptor in new development instead of the `post_get_featurestore` interceptor. + When both interceptors are used, this `post_get_featurestore_with_metadata` interceptor runs after the + `post_get_featurestore` interceptor. The (possibly modified) response returned by + `post_get_featurestore` will be passed to + `post_get_featurestore_with_metadata`. + """ + return response, metadata + def pre_import_feature_values( self, request: featurestore_service.ImportFeatureValuesRequest, @@ -613,12 +916,35 @@ def post_import_feature_values( ) -> operations_pb2.Operation: """Post-rpc interceptor for import_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_import_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_import_feature_values` interceptor runs + before the `post_import_feature_values_with_metadata` interceptor. """ return response + def post_import_feature_values_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for import_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_import_feature_values_with_metadata` + interceptor in new development instead of the `post_import_feature_values` interceptor. + When both interceptors are used, this `post_import_feature_values_with_metadata` interceptor runs after the + `post_import_feature_values` interceptor. The (possibly modified) response returned by + `post_import_feature_values` will be passed to + `post_import_feature_values_with_metadata`. + """ + return response, metadata + def pre_list_entity_types( self, request: featurestore_service.ListEntityTypesRequest, @@ -639,12 +965,38 @@ def post_list_entity_types( ) -> featurestore_service.ListEntityTypesResponse: """Post-rpc interceptor for list_entity_types - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_entity_types_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_list_entity_types` interceptor runs + before the `post_list_entity_types_with_metadata` interceptor. """ return response + def post_list_entity_types_with_metadata( + self, + response: featurestore_service.ListEntityTypesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.ListEntityTypesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_entity_types + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_list_entity_types_with_metadata` + interceptor in new development instead of the `post_list_entity_types` interceptor. + When both interceptors are used, this `post_list_entity_types_with_metadata` interceptor runs after the + `post_list_entity_types` interceptor. The (possibly modified) response returned by + `post_list_entity_types` will be passed to + `post_list_entity_types_with_metadata`. + """ + return response, metadata + def pre_list_features( self, request: featurestore_service.ListFeaturesRequest, @@ -665,12 +1017,38 @@ def post_list_features( ) -> featurestore_service.ListFeaturesResponse: """Post-rpc interceptor for list_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_list_features` interceptor runs + before the `post_list_features_with_metadata` interceptor. """ return response + def post_list_features_with_metadata( + self, + response: featurestore_service.ListFeaturesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.ListFeaturesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_list_features_with_metadata` + interceptor in new development instead of the `post_list_features` interceptor. + When both interceptors are used, this `post_list_features_with_metadata` interceptor runs after the + `post_list_features` interceptor. The (possibly modified) response returned by + `post_list_features` will be passed to + `post_list_features_with_metadata`. + """ + return response, metadata + def pre_list_featurestores( self, request: featurestore_service.ListFeaturestoresRequest, @@ -691,12 +1069,38 @@ def post_list_featurestores( ) -> featurestore_service.ListFeaturestoresResponse: """Post-rpc interceptor for list_featurestores - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_featurestores_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_list_featurestores` interceptor runs + before the `post_list_featurestores_with_metadata` interceptor. """ return response + def post_list_featurestores_with_metadata( + self, + response: featurestore_service.ListFeaturestoresResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.ListFeaturestoresResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_featurestores + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_list_featurestores_with_metadata` + interceptor in new development instead of the `post_list_featurestores` interceptor. + When both interceptors are used, this `post_list_featurestores_with_metadata` interceptor runs after the + `post_list_featurestores` interceptor. The (possibly modified) response returned by + `post_list_featurestores` will be passed to + `post_list_featurestores_with_metadata`. + """ + return response, metadata + def pre_search_features( self, request: featurestore_service.SearchFeaturesRequest, @@ -717,12 +1121,38 @@ def post_search_features( ) -> featurestore_service.SearchFeaturesResponse: """Post-rpc interceptor for search_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_search_features` interceptor runs + before the `post_search_features_with_metadata` interceptor. """ return response + def post_search_features_with_metadata( + self, + response: featurestore_service.SearchFeaturesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.SearchFeaturesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for search_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_search_features_with_metadata` + interceptor in new development instead of the `post_search_features` interceptor. + When both interceptors are used, this `post_search_features_with_metadata` interceptor runs after the + `post_search_features` interceptor. The (possibly modified) response returned by + `post_search_features` will be passed to + `post_search_features_with_metadata`. + """ + return response, metadata + def pre_update_entity_type( self, request: featurestore_service.UpdateEntityTypeRequest, @@ -743,12 +1173,35 @@ def post_update_entity_type( ) -> gca_entity_type.EntityType: """Post-rpc interceptor for update_entity_type - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_entity_type_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_update_entity_type` interceptor runs + before the `post_update_entity_type_with_metadata` interceptor. """ return response + def post_update_entity_type_with_metadata( + self, + response: gca_entity_type.EntityType, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_entity_type.EntityType, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_entity_type + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_update_entity_type_with_metadata` + interceptor in new development instead of the `post_update_entity_type` interceptor. + When both interceptors are used, this `post_update_entity_type_with_metadata` interceptor runs after the + `post_update_entity_type` interceptor. The (possibly modified) response returned by + `post_update_entity_type` will be passed to + `post_update_entity_type_with_metadata`. + """ + return response, metadata + def pre_update_feature( self, request: featurestore_service.UpdateFeatureRequest, @@ -767,12 +1220,35 @@ def pre_update_feature( def post_update_feature(self, response: gca_feature.Feature) -> gca_feature.Feature: """Post-rpc interceptor for update_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature` interceptor runs + before the `post_update_feature_with_metadata` interceptor. """ return response + def post_update_feature_with_metadata( + self, + response: gca_feature.Feature, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_feature.Feature, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_update_feature_with_metadata` + interceptor in new development instead of the `post_update_feature` interceptor. + When both interceptors are used, this `post_update_feature_with_metadata` interceptor runs after the + `post_update_feature` interceptor. The (possibly modified) response returned by + `post_update_feature` will be passed to + `post_update_feature_with_metadata`. + """ + return response, metadata + def pre_update_featurestore( self, request: featurestore_service.UpdateFeaturestoreRequest, @@ -793,12 +1269,35 @@ def post_update_featurestore( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_featurestore - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_featurestore_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_update_featurestore` interceptor runs + before the `post_update_featurestore_with_metadata` interceptor. """ return response + def post_update_featurestore_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_featurestore + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_update_featurestore_with_metadata` + interceptor in new development instead of the `post_update_featurestore` interceptor. + When both interceptors are used, this `post_update_featurestore_with_metadata` interceptor runs after the + `post_update_featurestore` interceptor. The (possibly modified) response returned by + `post_update_featurestore` will be passed to + `post_update_featurestore_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1089,9 +1588,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1302,6 +1802,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1438,6 +1942,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1446,6 +1954,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1486,6 +2002,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1648,6 +2168,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1676,6 +2200,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1788,6 +2316,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1796,6 +2328,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1856,6 +2396,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2018,6 +2562,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2058,6 +2606,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2170,6 +2722,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2178,6 +2734,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2238,6 +2802,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2396,6 +2964,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2436,6 +3008,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2548,6 +3124,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2568,6 +3152,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2616,6 +3204,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2782,6 +3374,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2814,6 +3410,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2922,6 +3522,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2930,6 +3534,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2990,6 +3602,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3100,7 +3716,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3142,6 +3758,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_batch_create_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_batch_create_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3254,7 +3874,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3296,6 +3916,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_batch_read_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_batch_read_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3408,7 +4032,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3448,6 +4072,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_entity_type(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_entity_type_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3560,7 +4188,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3600,6 +4228,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3712,7 +4344,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3754,6 +4386,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_featurestore(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_featurestore_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3861,7 +4497,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3900,6 +4536,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_entity_type(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_entity_type_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4007,7 +4647,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4046,6 +4686,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4153,7 +4797,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4194,6 +4838,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_featurestore(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_featurestore_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4306,7 +4954,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4348,6 +4996,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4460,7 +5112,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4502,6 +5154,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_export_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_export_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4651,6 +5307,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_entity_type(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_entity_type_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4799,6 +5459,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4949,6 +5613,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_featurestore(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_featurestore_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5061,7 +5729,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5103,6 +5771,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_import_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_import_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5250,6 +5922,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_entity_types(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_entity_types_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5401,6 +6077,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5552,6 +6232,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_featurestores(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_featurestores_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5699,6 +6383,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_search_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_search_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5858,6 +6546,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_entity_type(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_entity_type_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6012,6 +6704,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6124,7 +6820,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6166,6 +6862,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_featurestore(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_featurestore_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6439,7 +7139,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -6582,7 +7281,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -6725,7 +7423,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -6869,7 +7566,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -7017,7 +7713,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -7164,7 +7859,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -7281,7 +7975,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -7398,7 +8091,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -7541,7 +8233,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -7684,7 +8375,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py index e7bdc1b6cc..c609f94c78 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -86,6 +86,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncFeaturestoreServiceRestInterceptor: """Asynchronous Interceptor for FeaturestoreService. @@ -296,12 +299,35 @@ async def post_batch_create_features( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_create_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_create_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_batch_create_features` interceptor runs + before the `post_batch_create_features_with_metadata` interceptor. """ return response + async def post_batch_create_features_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_create_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_batch_create_features_with_metadata` + interceptor in new development instead of the `post_batch_create_features` interceptor. + When both interceptors are used, this `post_batch_create_features_with_metadata` interceptor runs after the + `post_batch_create_features` interceptor. The (possibly modified) response returned by + `post_batch_create_features` will be passed to + `post_batch_create_features_with_metadata`. + """ + return response, metadata + async def pre_batch_read_feature_values( self, request: featurestore_service.BatchReadFeatureValuesRequest, @@ -322,12 +348,35 @@ async def post_batch_read_feature_values( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_read_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_read_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_batch_read_feature_values` interceptor runs + before the `post_batch_read_feature_values_with_metadata` interceptor. """ return response + async def post_batch_read_feature_values_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_read_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_batch_read_feature_values_with_metadata` + interceptor in new development instead of the `post_batch_read_feature_values` interceptor. + When both interceptors are used, this `post_batch_read_feature_values_with_metadata` interceptor runs after the + `post_batch_read_feature_values` interceptor. The (possibly modified) response returned by + `post_batch_read_feature_values` will be passed to + `post_batch_read_feature_values_with_metadata`. + """ + return response, metadata + async def pre_create_entity_type( self, request: featurestore_service.CreateEntityTypeRequest, @@ -348,12 +397,35 @@ async def post_create_entity_type( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_entity_type - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_entity_type_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_create_entity_type` interceptor runs + before the `post_create_entity_type_with_metadata` interceptor. """ return response + async def post_create_entity_type_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_entity_type + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_create_entity_type_with_metadata` + interceptor in new development instead of the `post_create_entity_type` interceptor. + When both interceptors are used, this `post_create_entity_type_with_metadata` interceptor runs after the + `post_create_entity_type` interceptor. The (possibly modified) response returned by + `post_create_entity_type` will be passed to + `post_create_entity_type_with_metadata`. + """ + return response, metadata + async def pre_create_feature( self, request: featurestore_service.CreateFeatureRequest, @@ -374,12 +446,35 @@ async def post_create_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_create_feature` interceptor runs + before the `post_create_feature_with_metadata` interceptor. """ return response + async def post_create_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_create_feature_with_metadata` + interceptor in new development instead of the `post_create_feature` interceptor. + When both interceptors are used, this `post_create_feature_with_metadata` interceptor runs after the + `post_create_feature` interceptor. The (possibly modified) response returned by + `post_create_feature` will be passed to + `post_create_feature_with_metadata`. + """ + return response, metadata + async def pre_create_featurestore( self, request: featurestore_service.CreateFeaturestoreRequest, @@ -400,12 +495,35 @@ async def post_create_featurestore( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_featurestore - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_featurestore_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_create_featurestore` interceptor runs + before the `post_create_featurestore_with_metadata` interceptor. """ return response + async def post_create_featurestore_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_featurestore + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_create_featurestore_with_metadata` + interceptor in new development instead of the `post_create_featurestore` interceptor. + When both interceptors are used, this `post_create_featurestore_with_metadata` interceptor runs after the + `post_create_featurestore` interceptor. The (possibly modified) response returned by + `post_create_featurestore` will be passed to + `post_create_featurestore_with_metadata`. + """ + return response, metadata + async def pre_delete_entity_type( self, request: featurestore_service.DeleteEntityTypeRequest, @@ -426,12 +544,35 @@ async def post_delete_entity_type( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_entity_type - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_entity_type_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_delete_entity_type` interceptor runs + before the `post_delete_entity_type_with_metadata` interceptor. """ return response + async def post_delete_entity_type_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_entity_type + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_delete_entity_type_with_metadata` + interceptor in new development instead of the `post_delete_entity_type` interceptor. + When both interceptors are used, this `post_delete_entity_type_with_metadata` interceptor runs after the + `post_delete_entity_type` interceptor. The (possibly modified) response returned by + `post_delete_entity_type` will be passed to + `post_delete_entity_type_with_metadata`. + """ + return response, metadata + async def pre_delete_feature( self, request: featurestore_service.DeleteFeatureRequest, @@ -452,12 +593,35 @@ async def post_delete_feature( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature` interceptor runs + before the `post_delete_feature_with_metadata` interceptor. """ return response + async def post_delete_feature_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_with_metadata` + interceptor in new development instead of the `post_delete_feature` interceptor. + When both interceptors are used, this `post_delete_feature_with_metadata` interceptor runs after the + `post_delete_feature` interceptor. The (possibly modified) response returned by + `post_delete_feature` will be passed to + `post_delete_feature_with_metadata`. + """ + return response, metadata + async def pre_delete_featurestore( self, request: featurestore_service.DeleteFeaturestoreRequest, @@ -478,12 +642,35 @@ async def post_delete_featurestore( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_featurestore - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_featurestore_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_delete_featurestore` interceptor runs + before the `post_delete_featurestore_with_metadata` interceptor. """ return response + async def post_delete_featurestore_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_featurestore + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_delete_featurestore_with_metadata` + interceptor in new development instead of the `post_delete_featurestore` interceptor. + When both interceptors are used, this `post_delete_featurestore_with_metadata` interceptor runs after the + `post_delete_featurestore` interceptor. The (possibly modified) response returned by + `post_delete_featurestore` will be passed to + `post_delete_featurestore_with_metadata`. + """ + return response, metadata + async def pre_delete_feature_values( self, request: featurestore_service.DeleteFeatureValuesRequest, @@ -504,12 +691,35 @@ async def post_delete_feature_values( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_delete_feature_values` interceptor runs + before the `post_delete_feature_values_with_metadata` interceptor. """ return response + async def post_delete_feature_values_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_delete_feature_values_with_metadata` + interceptor in new development instead of the `post_delete_feature_values` interceptor. + When both interceptors are used, this `post_delete_feature_values_with_metadata` interceptor runs after the + `post_delete_feature_values` interceptor. The (possibly modified) response returned by + `post_delete_feature_values` will be passed to + `post_delete_feature_values_with_metadata`. + """ + return response, metadata + async def pre_export_feature_values( self, request: featurestore_service.ExportFeatureValuesRequest, @@ -530,12 +740,35 @@ async def post_export_feature_values( ) -> operations_pb2.Operation: """Post-rpc interceptor for export_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_export_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_export_feature_values` interceptor runs + before the `post_export_feature_values_with_metadata` interceptor. """ return response + async def post_export_feature_values_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for export_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_export_feature_values_with_metadata` + interceptor in new development instead of the `post_export_feature_values` interceptor. + When both interceptors are used, this `post_export_feature_values_with_metadata` interceptor runs after the + `post_export_feature_values` interceptor. The (possibly modified) response returned by + `post_export_feature_values` will be passed to + `post_export_feature_values_with_metadata`. + """ + return response, metadata + async def pre_get_entity_type( self, request: featurestore_service.GetEntityTypeRequest, @@ -556,12 +789,35 @@ async def post_get_entity_type( ) -> entity_type.EntityType: """Post-rpc interceptor for get_entity_type - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_entity_type_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_get_entity_type` interceptor runs + before the `post_get_entity_type_with_metadata` interceptor. """ return response + async def post_get_entity_type_with_metadata( + self, + response: entity_type.EntityType, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[entity_type.EntityType, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_entity_type + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_get_entity_type_with_metadata` + interceptor in new development instead of the `post_get_entity_type` interceptor. + When both interceptors are used, this `post_get_entity_type_with_metadata` interceptor runs after the + `post_get_entity_type` interceptor. The (possibly modified) response returned by + `post_get_entity_type` will be passed to + `post_get_entity_type_with_metadata`. + """ + return response, metadata + async def pre_get_feature( self, request: featurestore_service.GetFeatureRequest, @@ -579,12 +835,35 @@ async def pre_get_feature( async def post_get_feature(self, response: feature.Feature) -> feature.Feature: """Post-rpc interceptor for get_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_get_feature` interceptor runs + before the `post_get_feature_with_metadata` interceptor. """ return response + async def post_get_feature_with_metadata( + self, + response: feature.Feature, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[feature.Feature, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_get_feature_with_metadata` + interceptor in new development instead of the `post_get_feature` interceptor. + When both interceptors are used, this `post_get_feature_with_metadata` interceptor runs after the + `post_get_feature` interceptor. The (possibly modified) response returned by + `post_get_feature` will be passed to + `post_get_feature_with_metadata`. + """ + return response, metadata + async def pre_get_featurestore( self, request: featurestore_service.GetFeaturestoreRequest, @@ -605,12 +884,35 @@ async def post_get_featurestore( ) -> featurestore.Featurestore: """Post-rpc interceptor for get_featurestore - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_featurestore_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_get_featurestore` interceptor runs + before the `post_get_featurestore_with_metadata` interceptor. """ return response + async def post_get_featurestore_with_metadata( + self, + response: featurestore.Featurestore, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[featurestore.Featurestore, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_featurestore + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_get_featurestore_with_metadata` + interceptor in new development instead of the `post_get_featurestore` interceptor. + When both interceptors are used, this `post_get_featurestore_with_metadata` interceptor runs after the + `post_get_featurestore` interceptor. The (possibly modified) response returned by + `post_get_featurestore` will be passed to + `post_get_featurestore_with_metadata`. + """ + return response, metadata + async def pre_import_feature_values( self, request: featurestore_service.ImportFeatureValuesRequest, @@ -631,12 +933,35 @@ async def post_import_feature_values( ) -> operations_pb2.Operation: """Post-rpc interceptor for import_feature_values - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_import_feature_values_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_import_feature_values` interceptor runs + before the `post_import_feature_values_with_metadata` interceptor. """ return response + async def post_import_feature_values_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for import_feature_values + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_import_feature_values_with_metadata` + interceptor in new development instead of the `post_import_feature_values` interceptor. + When both interceptors are used, this `post_import_feature_values_with_metadata` interceptor runs after the + `post_import_feature_values` interceptor. The (possibly modified) response returned by + `post_import_feature_values` will be passed to + `post_import_feature_values_with_metadata`. + """ + return response, metadata + async def pre_list_entity_types( self, request: featurestore_service.ListEntityTypesRequest, @@ -657,12 +982,38 @@ async def post_list_entity_types( ) -> featurestore_service.ListEntityTypesResponse: """Post-rpc interceptor for list_entity_types - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_entity_types_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_list_entity_types` interceptor runs + before the `post_list_entity_types_with_metadata` interceptor. """ return response + async def post_list_entity_types_with_metadata( + self, + response: featurestore_service.ListEntityTypesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.ListEntityTypesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_entity_types + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_list_entity_types_with_metadata` + interceptor in new development instead of the `post_list_entity_types` interceptor. + When both interceptors are used, this `post_list_entity_types_with_metadata` interceptor runs after the + `post_list_entity_types` interceptor. The (possibly modified) response returned by + `post_list_entity_types` will be passed to + `post_list_entity_types_with_metadata`. + """ + return response, metadata + async def pre_list_features( self, request: featurestore_service.ListFeaturesRequest, @@ -683,12 +1034,38 @@ async def post_list_features( ) -> featurestore_service.ListFeaturesResponse: """Post-rpc interceptor for list_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_list_features` interceptor runs + before the `post_list_features_with_metadata` interceptor. """ return response + async def post_list_features_with_metadata( + self, + response: featurestore_service.ListFeaturesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.ListFeaturesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_list_features_with_metadata` + interceptor in new development instead of the `post_list_features` interceptor. + When both interceptors are used, this `post_list_features_with_metadata` interceptor runs after the + `post_list_features` interceptor. The (possibly modified) response returned by + `post_list_features` will be passed to + `post_list_features_with_metadata`. + """ + return response, metadata + async def pre_list_featurestores( self, request: featurestore_service.ListFeaturestoresRequest, @@ -709,12 +1086,38 @@ async def post_list_featurestores( ) -> featurestore_service.ListFeaturestoresResponse: """Post-rpc interceptor for list_featurestores - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_featurestores_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_list_featurestores` interceptor runs + before the `post_list_featurestores_with_metadata` interceptor. """ return response + async def post_list_featurestores_with_metadata( + self, + response: featurestore_service.ListFeaturestoresResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.ListFeaturestoresResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_featurestores + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_list_featurestores_with_metadata` + interceptor in new development instead of the `post_list_featurestores` interceptor. + When both interceptors are used, this `post_list_featurestores_with_metadata` interceptor runs after the + `post_list_featurestores` interceptor. The (possibly modified) response returned by + `post_list_featurestores` will be passed to + `post_list_featurestores_with_metadata`. + """ + return response, metadata + async def pre_search_features( self, request: featurestore_service.SearchFeaturesRequest, @@ -735,12 +1138,38 @@ async def post_search_features( ) -> featurestore_service.SearchFeaturesResponse: """Post-rpc interceptor for search_features - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_features_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_search_features` interceptor runs + before the `post_search_features_with_metadata` interceptor. """ return response + async def post_search_features_with_metadata( + self, + response: featurestore_service.SearchFeaturesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + featurestore_service.SearchFeaturesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for search_features + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_search_features_with_metadata` + interceptor in new development instead of the `post_search_features` interceptor. + When both interceptors are used, this `post_search_features_with_metadata` interceptor runs after the + `post_search_features` interceptor. The (possibly modified) response returned by + `post_search_features` will be passed to + `post_search_features_with_metadata`. + """ + return response, metadata + async def pre_update_entity_type( self, request: featurestore_service.UpdateEntityTypeRequest, @@ -761,12 +1190,35 @@ async def post_update_entity_type( ) -> gca_entity_type.EntityType: """Post-rpc interceptor for update_entity_type - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_entity_type_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_update_entity_type` interceptor runs + before the `post_update_entity_type_with_metadata` interceptor. """ return response + async def post_update_entity_type_with_metadata( + self, + response: gca_entity_type.EntityType, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_entity_type.EntityType, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_entity_type + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_update_entity_type_with_metadata` + interceptor in new development instead of the `post_update_entity_type` interceptor. + When both interceptors are used, this `post_update_entity_type_with_metadata` interceptor runs after the + `post_update_entity_type` interceptor. The (possibly modified) response returned by + `post_update_entity_type` will be passed to + `post_update_entity_type_with_metadata`. + """ + return response, metadata + async def pre_update_feature( self, request: featurestore_service.UpdateFeatureRequest, @@ -787,12 +1239,35 @@ async def post_update_feature( ) -> gca_feature.Feature: """Post-rpc interceptor for update_feature - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_feature_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_update_feature` interceptor runs + before the `post_update_feature_with_metadata` interceptor. """ return response + async def post_update_feature_with_metadata( + self, + response: gca_feature.Feature, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_feature.Feature, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_feature + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_update_feature_with_metadata` + interceptor in new development instead of the `post_update_feature` interceptor. + When both interceptors are used, this `post_update_feature_with_metadata` interceptor runs after the + `post_update_feature` interceptor. The (possibly modified) response returned by + `post_update_feature` will be passed to + `post_update_feature_with_metadata`. + """ + return response, metadata + async def pre_update_featurestore( self, request: featurestore_service.UpdateFeaturestoreRequest, @@ -813,12 +1288,35 @@ async def post_update_featurestore( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_featurestore - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_featurestore_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the FeaturestoreService server but before - it is returned to user code. + it is returned to user code. This `post_update_featurestore` interceptor runs + before the `post_update_featurestore_with_metadata` interceptor. """ return response + async def post_update_featurestore_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_featurestore + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the FeaturestoreService server but before it is returned to user code. + + We recommend only using this `post_update_featurestore_with_metadata` + interceptor in new development instead of the `post_update_featurestore` interceptor. + When both interceptors are used, this `post_update_featurestore_with_metadata` interceptor runs after the + `post_update_featurestore` interceptor. The (possibly modified) response returned by + `post_update_featurestore` will be passed to + `post_update_featurestore_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1128,9 +1626,9 @@ def __init__( self._interceptor = interceptor or AsyncFeaturestoreServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -1388,7 +1886,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1435,6 +1933,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_create_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_batch_create_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1548,7 +2050,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1595,6 +2097,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_read_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_batch_read_feature_values_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1708,7 +2216,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1755,6 +2263,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_entity_type(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_entity_type_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1870,7 +2382,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1917,6 +2429,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2030,7 +2546,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2077,6 +2593,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_featurestore(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_featurestore_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2185,7 +2705,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2231,6 +2751,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_entity_type(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_entity_type_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2341,7 +2865,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2387,6 +2911,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2495,7 +3023,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2541,6 +3069,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_featurestore(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_featurestore_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2654,7 +3186,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2701,6 +3233,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2814,7 +3350,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2861,6 +3397,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_export_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_export_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3018,6 +3558,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_entity_type(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_entity_type_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3176,6 +3720,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3332,6 +3880,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_featurestore(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_featurestore_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3445,7 +3997,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3492,6 +4044,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_import_feature_values(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_import_feature_values_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3645,6 +4201,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_entity_types(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_entity_types_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3806,6 +4366,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3961,6 +4525,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_featurestores(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_featurestores_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4116,6 +4684,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_features(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_search_features_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4281,6 +4853,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_entity_type(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_entity_type_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4443,6 +5019,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_feature(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_feature_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4556,7 +5136,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4603,6 +5183,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_featurestore(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_featurestore_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4798,6 +5382,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -4934,6 +5522,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -4942,6 +5534,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4982,6 +5582,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -5144,6 +5748,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -5172,6 +5780,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -5284,6 +5896,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -5292,6 +5908,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5352,6 +5976,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -5514,6 +6142,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -5554,6 +6186,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -5666,6 +6302,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -5674,6 +6314,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5734,6 +6382,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -5892,6 +6544,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -5932,6 +6588,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -6044,6 +6704,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -6064,6 +6732,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -6112,6 +6784,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -6278,6 +6954,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -6310,6 +6990,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -6418,6 +7102,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -6426,6 +7114,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -6486,6 +7182,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -6712,7 +7412,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -6865,7 +7564,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -7016,7 +7714,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -7170,7 +7867,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -7328,7 +8024,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -7479,7 +8174,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -7602,7 +8296,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -7725,7 +8418,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -7878,7 +8570,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -8029,7 +8720,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_base.py index 1d30318877..4228da1729 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1322,6 +1322,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -1408,6 +1412,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -1486,6 +1495,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -1667,6 +1680,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1803,6 +1820,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1811,6 +1832,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1851,6 +1880,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2032,6 +2065,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2060,6 +2097,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2172,6 +2213,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2180,6 +2225,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2240,6 +2293,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2421,6 +2478,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2461,6 +2522,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2573,6 +2638,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2581,6 +2650,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2641,6 +2718,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2818,6 +2899,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2858,6 +2943,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2970,6 +3059,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2990,6 +3087,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -3038,6 +3139,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3223,6 +3328,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3255,6 +3364,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3363,6 +3476,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3371,6 +3488,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3431,6 +3556,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/__init__.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/__init__.py new file mode 100644 index 0000000000..e99930ebf4 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import GenAiCacheServiceClient +from .async_client import GenAiCacheServiceAsyncClient + +__all__ = ( + "GenAiCacheServiceClient", + "GenAiCacheServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py new file mode 100644 index 0000000000..19bebec217 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/async_client.py @@ -0,0 +1,1624 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging as std_logging +from collections import OrderedDict +import re +from typing import ( + Dict, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1.services.gen_ai_cache_service import pagers +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content +from google.cloud.aiplatform_v1.types import content +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import gen_ai_cache_service +from google.cloud.aiplatform_v1.types import tool +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +from .transports.base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import GenAiCacheServiceGrpcAsyncIOTransport +from .client import GenAiCacheServiceClient + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class GenAiCacheServiceAsyncClient: + """Service for managing Vertex AI's CachedContent resource.""" + + _client: GenAiCacheServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = GenAiCacheServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = GenAiCacheServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = GenAiCacheServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = GenAiCacheServiceClient._DEFAULT_UNIVERSE + + cached_content_path = staticmethod(GenAiCacheServiceClient.cached_content_path) + parse_cached_content_path = staticmethod( + GenAiCacheServiceClient.parse_cached_content_path + ) + rag_corpus_path = staticmethod(GenAiCacheServiceClient.rag_corpus_path) + parse_rag_corpus_path = staticmethod(GenAiCacheServiceClient.parse_rag_corpus_path) + common_billing_account_path = staticmethod( + GenAiCacheServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + GenAiCacheServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(GenAiCacheServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + GenAiCacheServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + GenAiCacheServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + GenAiCacheServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(GenAiCacheServiceClient.common_project_path) + parse_common_project_path = staticmethod( + GenAiCacheServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(GenAiCacheServiceClient.common_location_path) + parse_common_location_path = staticmethod( + GenAiCacheServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenAiCacheServiceAsyncClient: The constructed client. + """ + sa_info_func = ( + GenAiCacheServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(GenAiCacheServiceAsyncClient, info, *args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenAiCacheServiceAsyncClient: The constructed client. + """ + sa_file_func = ( + GenAiCacheServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(GenAiCacheServiceAsyncClient, filename, *args, **kwargs) + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return GenAiCacheServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> GenAiCacheServiceTransport: + """Returns the transport used by the client instance. + + Returns: + GenAiCacheServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = GenAiCacheServiceClient.get_transport_class + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + GenAiCacheServiceTransport, + Callable[..., GenAiCacheServiceTransport], + ] + ] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the gen ai cache service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,GenAiCacheServiceTransport,Callable[..., GenAiCacheServiceTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the GenAiCacheServiceTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = GenAiCacheServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient`.", + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "credentialsType": None, + } + ), + ) + + async def create_cached_content( + self, + request: Optional[ + Union[gen_ai_cache_service.CreateCachedContentRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + cached_content: Optional[gca_cached_content.CachedContent] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_cached_content.CachedContent: + r"""Creates cached content, this call will initialize the + cached content in the data storage, and users need to + pay for the cache data storage. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_create_cached_content(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateCachedContentRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_cached_content(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.CreateCachedContentRequest, dict]]): + The request object. Request message for + [GenAiCacheService.CreateCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.CreateCachedContent]. + parent (:class:`str`): + Required. The parent resource where + the cached content will be created + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cached_content (:class:`google.cloud.aiplatform_v1.types.CachedContent`): + Required. The cached content to + create + + This corresponds to the ``cached_content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, cached_content] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.CreateCachedContentRequest): + request = gen_ai_cache_service.CreateCachedContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if cached_content is not None: + request.cached_content = cached_content + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_cached_content + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_cached_content( + self, + request: Optional[ + Union[gen_ai_cache_service.GetCachedContentRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> cached_content.CachedContent: + r"""Gets cached content configurations + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_get_cached_content(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetCachedContentRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cached_content(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GetCachedContentRequest, dict]]): + The request object. Request message for + [GenAiCacheService.GetCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.GetCachedContent]. + name (:class:`str`): + Required. The resource name referring + to the cached content + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.GetCachedContentRequest): + request = gen_ai_cache_service.GetCachedContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_cached_content + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_cached_content( + self, + request: Optional[ + Union[gen_ai_cache_service.UpdateCachedContentRequest, dict] + ] = None, + *, + cached_content: Optional[gca_cached_content.CachedContent] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_cached_content.CachedContent: + r"""Updates cached content configurations + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_update_cached_content(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateCachedContentRequest( + ) + + # Make the request + response = await client.update_cached_content(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.UpdateCachedContentRequest, dict]]): + The request object. Request message for + [GenAiCacheService.UpdateCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.UpdateCachedContent]. + Only expire_time or ttl can be updated. + cached_content (:class:`google.cloud.aiplatform_v1.types.CachedContent`): + Required. The cached content to + update + + This corresponds to the ``cached_content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [cached_content, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.UpdateCachedContentRequest): + request = gen_ai_cache_service.UpdateCachedContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cached_content is not None: + request.cached_content = cached_content + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_cached_content + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("cached_content.name", request.cached_content.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_cached_content( + self, + request: Optional[ + Union[gen_ai_cache_service.DeleteCachedContentRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes cached content + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_delete_cached_content(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteCachedContentRequest( + name="name_value", + ) + + # Make the request + await client.delete_cached_content(request=request) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.DeleteCachedContentRequest, dict]]): + The request object. Request message for + [GenAiCacheService.DeleteCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.DeleteCachedContent]. + name (:class:`str`): + Required. The resource name referring + to the cached content + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.DeleteCachedContentRequest): + request = gen_ai_cache_service.DeleteCachedContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_cached_content + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_cached_contents( + self, + request: Optional[ + Union[gen_ai_cache_service.ListCachedContentsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListCachedContentsAsyncPager: + r"""Lists cached contents in a project + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_cached_contents(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListCachedContentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_cached_contents(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListCachedContentsRequest, dict]]): + The request object. Request to list CachedContents. + parent (:class:`str`): + Required. The parent, which owns this + collection of cached contents. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.services.gen_ai_cache_service.pagers.ListCachedContentsAsyncPager: + Response with a list of + CachedContents. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.ListCachedContentsRequest): + request = gen_ai_cache_service.ListCachedContentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_cached_contents + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListCachedContentsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.delete_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.wait_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[ + self._client._transport.test_iam_permissions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.get_location] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self.transport._wrapped_methods[self._client._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "GenAiCacheServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +__all__ = ("GenAiCacheServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py new file mode 100644 index 0000000000..8deb96e615 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/client.py @@ -0,0 +1,2137 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging +import os +import re +from typing import ( + Dict, + Callable, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + +from google.cloud.aiplatform_v1.services.gen_ai_cache_service import pagers +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content +from google.cloud.aiplatform_v1.types import content +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import gen_ai_cache_service +from google.cloud.aiplatform_v1.types import tool +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +from .transports.base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import GenAiCacheServiceGrpcTransport +from .transports.grpc_asyncio import GenAiCacheServiceGrpcAsyncIOTransport +from .transports.rest import GenAiCacheServiceRestTransport + +try: + from .transports.rest_asyncio import AsyncGenAiCacheServiceRestTransport + + HAS_ASYNC_REST_DEPENDENCIES = True +except ImportError as e: # pragma: NO COVER + HAS_ASYNC_REST_DEPENDENCIES = False + ASYNC_REST_EXCEPTION = e + + +class GenAiCacheServiceClientMeta(type): + """Metaclass for the GenAiCacheService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[GenAiCacheServiceTransport]] + _transport_registry["grpc"] = GenAiCacheServiceGrpcTransport + _transport_registry["grpc_asyncio"] = GenAiCacheServiceGrpcAsyncIOTransport + _transport_registry["rest"] = GenAiCacheServiceRestTransport + if HAS_ASYNC_REST_DEPENDENCIES: # pragma: NO COVER + _transport_registry["rest_asyncio"] = AsyncGenAiCacheServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[GenAiCacheServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if ( + label == "rest_asyncio" and not HAS_ASYNC_REST_DEPENDENCIES + ): # pragma: NO COVER + raise ASYNC_REST_EXCEPTION + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GenAiCacheServiceClient(metaclass=GenAiCacheServiceClientMeta): + """Service for managing Vertex AI's CachedContent resource.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenAiCacheServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenAiCacheServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GenAiCacheServiceTransport: + """Returns the transport used by the client instance. + + Returns: + GenAiCacheServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def cached_content_path( + project: str, + location: str, + cached_content: str, + ) -> str: + """Returns a fully-qualified cached_content string.""" + return "projects/{project}/locations/{location}/cachedContents/{cached_content}".format( + project=project, + location=location, + cached_content=cached_content, + ) + + @staticmethod + def parse_cached_content_path(path: str) -> Dict[str, str]: + """Parses a cached_content path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/cachedContents/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def rag_corpus_path( + project: str, + location: str, + rag_corpus: str, + ) -> str: + """Returns a fully-qualified rag_corpus string.""" + return "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}".format( + project=project, + location=location, + rag_corpus=rag_corpus, + ) + + @staticmethod + def parse_rag_corpus_path(path: str) -> Dict[str, str]: + """Parses a rag_corpus path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/ragCorpora/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = GenAiCacheServiceClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert: + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = GenAiCacheServiceClient._use_client_cert_effective() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert, use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = GenAiCacheServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = GenAiCacheServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = GenAiCacheServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = GenAiCacheServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True + + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + GenAiCacheServiceTransport, + Callable[..., GenAiCacheServiceTransport], + ] + ] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the gen ai cache service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,GenAiCacheServiceTransport,Callable[..., GenAiCacheServiceTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the GenAiCacheServiceTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + GenAiCacheServiceClient._read_environment_variables() + ) + self._client_cert_source = GenAiCacheServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = GenAiCacheServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, GenAiCacheServiceTransport) + if transport_provided: + # transport is a GenAiCacheServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(GenAiCacheServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or GenAiCacheServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + transport_init: Union[ + Type[GenAiCacheServiceTransport], + Callable[..., GenAiCacheServiceTransport], + ] = ( + GenAiCacheServiceClient.get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., GenAiCacheServiceTransport], transport) + ) + + if "rest_asyncio" in str(transport_init): + unsupported_params = { + "google.api_core.client_options.ClientOptions.credentials_file": self._client_options.credentials_file, + "google.api_core.client_options.ClientOptions.scopes": self._client_options.scopes, + "google.api_core.client_options.ClientOptions.quota_project_id": self._client_options.quota_project_id, + "google.api_core.client_options.ClientOptions.client_cert_source": self._client_options.client_cert_source, + "google.api_core.client_options.ClientOptions.api_audience": self._client_options.api_audience, + } + provided_unsupported_params = [ + name + for name, value in unsupported_params.items() + if value is not None + ] + if provided_unsupported_params: + raise core_exceptions.AsyncRestUnsupportedParameterError( # type: ignore + f"The following provided parameters are not supported for `transport=rest_asyncio`: {', '.join(provided_unsupported_params)}" + ) + self._transport = transport_init( + credentials=credentials, + host=self._api_endpoint, + client_info=client_info, + ) + return + + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + # initialize with the provided callable or the passed in class + self._transport = transport_init( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.cloud.aiplatform_v1.GenAiCacheServiceClient`.", + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "credentialsType": None, + } + ), + ) + + def create_cached_content( + self, + request: Optional[ + Union[gen_ai_cache_service.CreateCachedContentRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + cached_content: Optional[gca_cached_content.CachedContent] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_cached_content.CachedContent: + r"""Creates cached content, this call will initialize the + cached content in the data storage, and users need to + pay for the cache data storage. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_create_cached_content(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreateCachedContentRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_cached_content(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateCachedContentRequest, dict]): + The request object. Request message for + [GenAiCacheService.CreateCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.CreateCachedContent]. + parent (str): + Required. The parent resource where + the cached content will be created + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cached_content (google.cloud.aiplatform_v1.types.CachedContent): + Required. The cached content to + create + + This corresponds to the ``cached_content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, cached_content] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.CreateCachedContentRequest): + request = gen_ai_cache_service.CreateCachedContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if cached_content is not None: + request.cached_content = cached_content + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cached_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_cached_content( + self, + request: Optional[ + Union[gen_ai_cache_service.GetCachedContentRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> cached_content.CachedContent: + r"""Gets cached content configurations + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_get_cached_content(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetCachedContentRequest( + name="name_value", + ) + + # Make the request + response = client.get_cached_content(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetCachedContentRequest, dict]): + The request object. Request message for + [GenAiCacheService.GetCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.GetCachedContent]. + name (str): + Required. The resource name referring + to the cached content + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.GetCachedContentRequest): + request = gen_ai_cache_service.GetCachedContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cached_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_cached_content( + self, + request: Optional[ + Union[gen_ai_cache_service.UpdateCachedContentRequest, dict] + ] = None, + *, + cached_content: Optional[gca_cached_content.CachedContent] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_cached_content.CachedContent: + r"""Updates cached content configurations + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_update_cached_content(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdateCachedContentRequest( + ) + + # Make the request + response = client.update_cached_content(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdateCachedContentRequest, dict]): + The request object. Request message for + [GenAiCacheService.UpdateCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.UpdateCachedContent]. + Only expire_time or ttl can be updated. + cached_content (google.cloud.aiplatform_v1.types.CachedContent): + Required. The cached content to + update + + This corresponds to the ``cached_content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [cached_content, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.UpdateCachedContentRequest): + request = gen_ai_cache_service.UpdateCachedContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cached_content is not None: + request.cached_content = cached_content + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cached_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("cached_content.name", request.cached_content.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_cached_content( + self, + request: Optional[ + Union[gen_ai_cache_service.DeleteCachedContentRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes cached content + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_delete_cached_content(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteCachedContentRequest( + name="name_value", + ) + + # Make the request + client.delete_cached_content(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteCachedContentRequest, dict]): + The request object. Request message for + [GenAiCacheService.DeleteCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.DeleteCachedContent]. + name (str): + Required. The resource name referring + to the cached content + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.DeleteCachedContentRequest): + request = gen_ai_cache_service.DeleteCachedContentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cached_content] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def list_cached_contents( + self, + request: Optional[ + Union[gen_ai_cache_service.ListCachedContentsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListCachedContentsPager: + r"""Lists cached contents in a project + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_cached_contents(): + # Create a client + client = aiplatform_v1.GenAiCacheServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListCachedContentsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_cached_contents(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListCachedContentsRequest, dict]): + The request object. Request to list CachedContents. + parent (str): + Required. The parent, which owns this + collection of cached contents. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.services.gen_ai_cache_service.pagers.ListCachedContentsPager: + Response with a list of + CachedContents. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, gen_ai_cache_service.ListCachedContentsRequest): + request = gen_ai_cache_service.ListCachedContentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_cached_contents] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListCachedContentsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "GenAiCacheServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.wait_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_location] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + +__all__ = ("GenAiCacheServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/pagers.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/pagers.py new file mode 100644 index 0000000000..083a0fa3b9 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/pagers.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, + Union, +) + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import gen_ai_cache_service + + +class ListCachedContentsPager: + """A pager for iterating through ``list_cached_contents`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListCachedContentsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``cached_contents`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListCachedContents`` requests and continue to iterate + through the ``cached_contents`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListCachedContentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., gen_ai_cache_service.ListCachedContentsResponse], + request: gen_ai_cache_service.ListCachedContentsRequest, + response: gen_ai_cache_service.ListCachedContentsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListCachedContentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListCachedContentsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = gen_ai_cache_service.ListCachedContentsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[gen_ai_cache_service.ListCachedContentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[cached_content.CachedContent]: + for page in self.pages: + yield from page.cached_contents + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListCachedContentsAsyncPager: + """A pager for iterating through ``list_cached_contents`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListCachedContentsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``cached_contents`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListCachedContents`` requests and continue to iterate + through the ``cached_contents`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListCachedContentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[gen_ai_cache_service.ListCachedContentsResponse] + ], + request: gen_ai_cache_service.ListCachedContentsRequest, + response: gen_ai_cache_service.ListCachedContentsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListCachedContentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListCachedContentsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = gen_ai_cache_service.ListCachedContentsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[gen_ai_cache_service.ListCachedContentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[cached_content.CachedContent]: + async def async_generator(): + async for page in self.pages: + for response in page.cached_contents: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/README.rst b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/README.rst new file mode 100644 index 0000000000..22c94081d3 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`GenAiCacheServiceTransport` is the ABC for all transports. +- public child `GenAiCacheServiceGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `GenAiCacheServiceGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseGenAiCacheServiceRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `GenAiCacheServiceRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/__init__.py new file mode 100644 index 0000000000..13ea33cff7 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type, Tuple + +from .base import GenAiCacheServiceTransport +from .grpc import GenAiCacheServiceGrpcTransport +from .grpc_asyncio import GenAiCacheServiceGrpcAsyncIOTransport +from .rest import GenAiCacheServiceRestTransport +from .rest import GenAiCacheServiceRestInterceptor + +ASYNC_REST_CLASSES: Tuple[str, ...] +try: + from .rest_asyncio import AsyncGenAiCacheServiceRestTransport + from .rest_asyncio import AsyncGenAiCacheServiceRestInterceptor + + ASYNC_REST_CLASSES = ( + "AsyncGenAiCacheServiceRestTransport", + "AsyncGenAiCacheServiceRestInterceptor", + ) + HAS_REST_ASYNC = True +except ImportError: # pragma: NO COVER + ASYNC_REST_CLASSES = () + HAS_REST_ASYNC = False + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GenAiCacheServiceTransport]] +_transport_registry["grpc"] = GenAiCacheServiceGrpcTransport +_transport_registry["grpc_asyncio"] = GenAiCacheServiceGrpcAsyncIOTransport +_transport_registry["rest"] = GenAiCacheServiceRestTransport +if HAS_REST_ASYNC: # pragma: NO COVER + _transport_registry["rest_asyncio"] = AsyncGenAiCacheServiceRestTransport + +__all__ = ( + "GenAiCacheServiceTransport", + "GenAiCacheServiceGrpcTransport", + "GenAiCacheServiceGrpcAsyncIOTransport", + "GenAiCacheServiceRestTransport", + "GenAiCacheServiceRestInterceptor", +) + ASYNC_REST_CLASSES diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py new file mode 100644 index 0000000000..5068713a23 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/base.py @@ -0,0 +1,391 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore +import google.protobuf + +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content +from google.cloud.aiplatform_v1.types import gen_ai_cache_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class GenAiCacheServiceTransport(abc.ABC): + """Abstract transport class for GenAiCacheService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + # Save the scopes. + self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, + ) + elif credentials is None and not self._ignore_credentials: + credentials, _ = google.auth.default( + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_cached_content: gapic_v1.method.wrap_method( + self.create_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.get_cached_content: gapic_v1.method.wrap_method( + self.get_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.update_cached_content: gapic_v1.method.wrap_method( + self.update_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.delete_cached_content: gapic_v1.method.wrap_method( + self.delete_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.list_cached_contents: gapic_v1.method.wrap_method( + self.list_cached_contents, + default_timeout=None, + client_info=client_info, + ), + self.get_location: gapic_v1.method.wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: gapic_v1.method.wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: gapic_v1.method.wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: gapic_v1.method.wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def create_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.CreateCachedContentRequest], + Union[ + gca_cached_content.CachedContent, + Awaitable[gca_cached_content.CachedContent], + ], + ]: + raise NotImplementedError() + + @property + def get_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.GetCachedContentRequest], + Union[cached_content.CachedContent, Awaitable[cached_content.CachedContent]], + ]: + raise NotImplementedError() + + @property + def update_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.UpdateCachedContentRequest], + Union[ + gca_cached_content.CachedContent, + Awaitable[gca_cached_content.CachedContent], + ], + ]: + raise NotImplementedError() + + @property + def delete_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.DeleteCachedContentRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def list_cached_contents( + self, + ) -> Callable[ + [gen_ai_cache_service.ListCachedContentsRequest], + Union[ + gen_ai_cache_service.ListCachedContentsResponse, + Awaitable[gen_ai_cache_service.ListCachedContentsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("GenAiCacheServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py new file mode 100644 index 0000000000..6b25fa58d3 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc.py @@ -0,0 +1,686 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +import logging as std_logging +import pickle +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message + +import grpc # type: ignore +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content +from google.cloud.aiplatform_v1.types import gen_ai_cache_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +from .base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class GenAiCacheServiceGrpcTransport(GenAiCacheServiceTransport): + """gRPC backend transport for GenAiCacheService. + + Service for managing Vertex AI's CachedContent resource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, grpc.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def create_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.CreateCachedContentRequest], + gca_cached_content.CachedContent, + ]: + r"""Return a callable for the create cached content method over gRPC. + + Creates cached content, this call will initialize the + cached content in the data storage, and users need to + pay for the cache data storage. + + Returns: + Callable[[~.CreateCachedContentRequest], + ~.CachedContent]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cached_content" not in self._stubs: + self._stubs["create_cached_content"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/CreateCachedContent", + request_serializer=gen_ai_cache_service.CreateCachedContentRequest.serialize, + response_deserializer=gca_cached_content.CachedContent.deserialize, + ) + return self._stubs["create_cached_content"] + + @property + def get_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.GetCachedContentRequest], cached_content.CachedContent + ]: + r"""Return a callable for the get cached content method over gRPC. + + Gets cached content configurations + + Returns: + Callable[[~.GetCachedContentRequest], + ~.CachedContent]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cached_content" not in self._stubs: + self._stubs["get_cached_content"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/GetCachedContent", + request_serializer=gen_ai_cache_service.GetCachedContentRequest.serialize, + response_deserializer=cached_content.CachedContent.deserialize, + ) + return self._stubs["get_cached_content"] + + @property + def update_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.UpdateCachedContentRequest], + gca_cached_content.CachedContent, + ]: + r"""Return a callable for the update cached content method over gRPC. + + Updates cached content configurations + + Returns: + Callable[[~.UpdateCachedContentRequest], + ~.CachedContent]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cached_content" not in self._stubs: + self._stubs["update_cached_content"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/UpdateCachedContent", + request_serializer=gen_ai_cache_service.UpdateCachedContentRequest.serialize, + response_deserializer=gca_cached_content.CachedContent.deserialize, + ) + return self._stubs["update_cached_content"] + + @property + def delete_cached_content( + self, + ) -> Callable[[gen_ai_cache_service.DeleteCachedContentRequest], empty_pb2.Empty]: + r"""Return a callable for the delete cached content method over gRPC. + + Deletes cached content + + Returns: + Callable[[~.DeleteCachedContentRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cached_content" not in self._stubs: + self._stubs["delete_cached_content"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/DeleteCachedContent", + request_serializer=gen_ai_cache_service.DeleteCachedContentRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_cached_content"] + + @property + def list_cached_contents( + self, + ) -> Callable[ + [gen_ai_cache_service.ListCachedContentsRequest], + gen_ai_cache_service.ListCachedContentsResponse, + ]: + r"""Return a callable for the list cached contents method over gRPC. + + Lists cached contents in a project + + Returns: + Callable[[~.ListCachedContentsRequest], + ~.ListCachedContentsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_cached_contents" not in self._stubs: + self._stubs["list_cached_contents"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/ListCachedContents", + request_serializer=gen_ai_cache_service.ListCachedContentsRequest.serialize, + response_deserializer=gen_ai_cache_service.ListCachedContentsResponse.deserialize, + ) + return self._stubs["list_cached_contents"] + + def close(self): + self._logged_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "wait_operation" not in self._stubs: + self._stubs["wait_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("GenAiCacheServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..bc940698ae --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/grpc_asyncio.py @@ -0,0 +1,782 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import inspect +import json +import pickle +import logging as std_logging +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message + +import grpc # type: ignore +import proto # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content +from google.cloud.aiplatform_v1.types import gen_ai_cache_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +from .base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import GenAiCacheServiceGrpcTransport + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + + +class GenAiCacheServiceGrpcAsyncIOTransport(GenAiCacheServiceTransport): + """gRPC AsyncIO backend transport for GenAiCacheService. + + Service for managing Vertex AI's CachedContent resource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if a ``channel`` instance is provided. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if a ``channel`` instance is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if isinstance(channel, aio.Channel): + # Ignore credentials if a channel was passed. + credentials = None + self._ignore_credentials = True + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) + # Wrap messages. This must be done after self._logged_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.CreateCachedContentRequest], + Awaitable[gca_cached_content.CachedContent], + ]: + r"""Return a callable for the create cached content method over gRPC. + + Creates cached content, this call will initialize the + cached content in the data storage, and users need to + pay for the cache data storage. + + Returns: + Callable[[~.CreateCachedContentRequest], + Awaitable[~.CachedContent]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cached_content" not in self._stubs: + self._stubs["create_cached_content"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/CreateCachedContent", + request_serializer=gen_ai_cache_service.CreateCachedContentRequest.serialize, + response_deserializer=gca_cached_content.CachedContent.deserialize, + ) + return self._stubs["create_cached_content"] + + @property + def get_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.GetCachedContentRequest], + Awaitable[cached_content.CachedContent], + ]: + r"""Return a callable for the get cached content method over gRPC. + + Gets cached content configurations + + Returns: + Callable[[~.GetCachedContentRequest], + Awaitable[~.CachedContent]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cached_content" not in self._stubs: + self._stubs["get_cached_content"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/GetCachedContent", + request_serializer=gen_ai_cache_service.GetCachedContentRequest.serialize, + response_deserializer=cached_content.CachedContent.deserialize, + ) + return self._stubs["get_cached_content"] + + @property + def update_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.UpdateCachedContentRequest], + Awaitable[gca_cached_content.CachedContent], + ]: + r"""Return a callable for the update cached content method over gRPC. + + Updates cached content configurations + + Returns: + Callable[[~.UpdateCachedContentRequest], + Awaitable[~.CachedContent]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cached_content" not in self._stubs: + self._stubs["update_cached_content"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/UpdateCachedContent", + request_serializer=gen_ai_cache_service.UpdateCachedContentRequest.serialize, + response_deserializer=gca_cached_content.CachedContent.deserialize, + ) + return self._stubs["update_cached_content"] + + @property + def delete_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.DeleteCachedContentRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete cached content method over gRPC. + + Deletes cached content + + Returns: + Callable[[~.DeleteCachedContentRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cached_content" not in self._stubs: + self._stubs["delete_cached_content"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/DeleteCachedContent", + request_serializer=gen_ai_cache_service.DeleteCachedContentRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_cached_content"] + + @property + def list_cached_contents( + self, + ) -> Callable[ + [gen_ai_cache_service.ListCachedContentsRequest], + Awaitable[gen_ai_cache_service.ListCachedContentsResponse], + ]: + r"""Return a callable for the list cached contents method over gRPC. + + Lists cached contents in a project + + Returns: + Callable[[~.ListCachedContentsRequest], + Awaitable[~.ListCachedContentsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_cached_contents" not in self._stubs: + self._stubs["list_cached_contents"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiCacheService/ListCachedContents", + request_serializer=gen_ai_cache_service.ListCachedContentsRequest.serialize, + response_deserializer=gen_ai_cache_service.ListCachedContentsResponse.deserialize, + ) + return self._stubs["list_cached_contents"] + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.create_cached_content: self._wrap_method( + self.create_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.get_cached_content: self._wrap_method( + self.get_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.update_cached_content: self._wrap_method( + self.update_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.delete_cached_content: self._wrap_method( + self.delete_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.list_cached_contents: self._wrap_method( + self.list_cached_contents, + default_timeout=None, + client_info=client_info, + ), + self.get_location: self._wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: self._wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: self._wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: self._wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: self._wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: self._wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: self._wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + + def close(self): + return self._logged_channel.close() + + @property + def kind(self) -> str: + return "grpc_asyncio" + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "wait_operation" not in self._stubs: + self._stubs["wait_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self._logged_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self._logged_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("GenAiCacheServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py new file mode 100644 index 0000000000..99a1195f05 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest.py @@ -0,0 +1,2854 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import logging +import json # type: ignore + +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import gapic_v1 +import google.protobuf + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore + +from requests import __version__ as requests_version +import dataclasses +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + + +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content +from google.cloud.aiplatform_v1.types import gen_ai_cache_service +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + + +from .rest_base import _BaseGenAiCacheServiceRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"requests@{requests_version}", +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class GenAiCacheServiceRestInterceptor: + """Interceptor for GenAiCacheService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the GenAiCacheServiceRestTransport. + + .. code-block:: python + class MyCustomGenAiCacheServiceInterceptor(GenAiCacheServiceRestInterceptor): + def pre_create_cached_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_cached_content(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_cached_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_get_cached_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_cached_content(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_cached_contents(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_cached_contents(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_cached_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_cached_content(self, response): + logging.log(f"Received response: {response}") + return response + + transport = GenAiCacheServiceRestTransport(interceptor=MyCustomGenAiCacheServiceInterceptor()) + client = GenAiCacheServiceClient(transport=transport) + + + """ + + def pre_create_cached_content( + self, + request: gen_ai_cache_service.CreateCachedContentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.CreateCachedContentRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_cached_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_create_cached_content( + self, response: gca_cached_content.CachedContent + ) -> gca_cached_content.CachedContent: + """Post-rpc interceptor for create_cached_content + + DEPRECATED. Please use the `post_create_cached_content_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. This `post_create_cached_content` interceptor runs + before the `post_create_cached_content_with_metadata` interceptor. + """ + return response + + def post_create_cached_content_with_metadata( + self, + response: gca_cached_content.CachedContent, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_cached_content.CachedContent, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_cached_content + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiCacheService server but before it is returned to user code. + + We recommend only using this `post_create_cached_content_with_metadata` + interceptor in new development instead of the `post_create_cached_content` interceptor. + When both interceptors are used, this `post_create_cached_content_with_metadata` interceptor runs after the + `post_create_cached_content` interceptor. The (possibly modified) response returned by + `post_create_cached_content` will be passed to + `post_create_cached_content_with_metadata`. + """ + return response, metadata + + def pre_delete_cached_content( + self, + request: gen_ai_cache_service.DeleteCachedContentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.DeleteCachedContentRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_cached_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def pre_get_cached_content( + self, + request: gen_ai_cache_service.GetCachedContentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.GetCachedContentRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_cached_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_get_cached_content( + self, response: cached_content.CachedContent + ) -> cached_content.CachedContent: + """Post-rpc interceptor for get_cached_content + + DEPRECATED. Please use the `post_get_cached_content_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. This `post_get_cached_content` interceptor runs + before the `post_get_cached_content_with_metadata` interceptor. + """ + return response + + def post_get_cached_content_with_metadata( + self, + response: cached_content.CachedContent, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[cached_content.CachedContent, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_cached_content + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiCacheService server but before it is returned to user code. + + We recommend only using this `post_get_cached_content_with_metadata` + interceptor in new development instead of the `post_get_cached_content` interceptor. + When both interceptors are used, this `post_get_cached_content_with_metadata` interceptor runs after the + `post_get_cached_content` interceptor. The (possibly modified) response returned by + `post_get_cached_content` will be passed to + `post_get_cached_content_with_metadata`. + """ + return response, metadata + + def pre_list_cached_contents( + self, + request: gen_ai_cache_service.ListCachedContentsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.ListCachedContentsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_cached_contents + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_list_cached_contents( + self, response: gen_ai_cache_service.ListCachedContentsResponse + ) -> gen_ai_cache_service.ListCachedContentsResponse: + """Post-rpc interceptor for list_cached_contents + + DEPRECATED. Please use the `post_list_cached_contents_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. This `post_list_cached_contents` interceptor runs + before the `post_list_cached_contents_with_metadata` interceptor. + """ + return response + + def post_list_cached_contents_with_metadata( + self, + response: gen_ai_cache_service.ListCachedContentsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.ListCachedContentsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_cached_contents + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiCacheService server but before it is returned to user code. + + We recommend only using this `post_list_cached_contents_with_metadata` + interceptor in new development instead of the `post_list_cached_contents` interceptor. + When both interceptors are used, this `post_list_cached_contents_with_metadata` interceptor runs after the + `post_list_cached_contents` interceptor. The (possibly modified) response returned by + `post_list_cached_contents` will be passed to + `post_list_cached_contents_with_metadata`. + """ + return response, metadata + + def pre_update_cached_content( + self, + request: gen_ai_cache_service.UpdateCachedContentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.UpdateCachedContentRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_cached_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_update_cached_content( + self, response: gca_cached_content.CachedContent + ) -> gca_cached_content.CachedContent: + """Post-rpc interceptor for update_cached_content + + DEPRECATED. Please use the `post_update_cached_content_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. This `post_update_cached_content` interceptor runs + before the `post_update_cached_content_with_metadata` interceptor. + """ + return response + + def post_update_cached_content_with_metadata( + self, + response: gca_cached_content.CachedContent, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_cached_content.CachedContent, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for update_cached_content + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiCacheService server but before it is returned to user code. + + We recommend only using this `post_update_cached_content_with_metadata` + interceptor in new development instead of the `post_update_cached_content` interceptor. + When both interceptors are used, this `post_update_cached_content_with_metadata` interceptor runs after the + `post_update_cached_content` interceptor. The (possibly modified) response returned by + `post_update_cached_content` will be passed to + `post_update_cached_content_with_metadata`. + """ + return response, metadata + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.WaitOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class GenAiCacheServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: GenAiCacheServiceRestInterceptor + + +class GenAiCacheServiceRestTransport(_BaseGenAiCacheServiceRestTransport): + """REST backend synchronous transport for GenAiCacheService. + + Service for managing Vertex AI's CachedContent resource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[GenAiCacheServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or GenAiCacheServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _CreateCachedContent( + _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.CreateCachedContent") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: gen_ai_cache_service.CreateCachedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_cached_content.CachedContent: + r"""Call the create cached content method over HTTP. + + Args: + request (~.gen_ai_cache_service.CreateCachedContentRequest): + The request object. Request message for + [GenAiCacheService.CreateCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.CreateCachedContent]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.gca_cached_content.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_cached_content( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_transcoded_request( + http_options, request + ) + + body = _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.CreateCachedContent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "CreateCachedContent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + GenAiCacheServiceRestTransport._CreateCachedContent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_cached_content.CachedContent() + pb_resp = gca_cached_content.CachedContent.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_cached_content(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_cached_content_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_cached_content.CachedContent.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceClient.create_cached_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "CreateCachedContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteCachedContent( + _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.DeleteCachedContent") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: gen_ai_cache_service.DeleteCachedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete cached content method over HTTP. + + Args: + request (~.gen_ai_cache_service.DeleteCachedContentRequest): + The request object. Request message for + [GenAiCacheService.DeleteCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.DeleteCachedContent]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_cached_content( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.DeleteCachedContent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "DeleteCachedContent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + GenAiCacheServiceRestTransport._DeleteCachedContent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GetCachedContent( + _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.GetCachedContent") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: gen_ai_cache_service.GetCachedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> cached_content.CachedContent: + r"""Call the get cached content method over HTTP. + + Args: + request (~.gen_ai_cache_service.GetCachedContentRequest): + The request object. Request message for + [GenAiCacheService.GetCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.GetCachedContent]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.cached_content.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_cached_content( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.GetCachedContent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetCachedContent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._GetCachedContent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = cached_content.CachedContent() + pb_resp = cached_content.CachedContent.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_cached_content(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_cached_content_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = cached_content.CachedContent.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceClient.get_cached_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetCachedContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListCachedContents( + _BaseGenAiCacheServiceRestTransport._BaseListCachedContents, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.ListCachedContents") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: gen_ai_cache_service.ListCachedContentsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gen_ai_cache_service.ListCachedContentsResponse: + r"""Call the list cached contents method over HTTP. + + Args: + request (~.gen_ai_cache_service.ListCachedContentsRequest): + The request object. Request to list CachedContents. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.gen_ai_cache_service.ListCachedContentsResponse: + Response with a list of + CachedContents. + + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseListCachedContents._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_cached_contents( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseListCachedContents._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseListCachedContents._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.ListCachedContents", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListCachedContents", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._ListCachedContents._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gen_ai_cache_service.ListCachedContentsResponse() + pb_resp = gen_ai_cache_service.ListCachedContentsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_cached_contents(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_cached_contents_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + gen_ai_cache_service.ListCachedContentsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceClient.list_cached_contents", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListCachedContents", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UpdateCachedContent( + _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.UpdateCachedContent") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: gen_ai_cache_service.UpdateCachedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_cached_content.CachedContent: + r"""Call the update cached content method over HTTP. + + Args: + request (~.gen_ai_cache_service.UpdateCachedContentRequest): + The request object. Request message for + [GenAiCacheService.UpdateCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.UpdateCachedContent]. + Only expire_time or ttl can be updated. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.gca_cached_content.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_http_options() + ) + + request, metadata = self._interceptor.pre_update_cached_content( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_transcoded_request( + http_options, request + ) + + body = _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.UpdateCachedContent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "UpdateCachedContent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + GenAiCacheServiceRestTransport._UpdateCachedContent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_cached_content.CachedContent() + pb_resp = gca_cached_content.CachedContent.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_update_cached_content(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_cached_content_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_cached_content.CachedContent.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceClient.update_cached_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "UpdateCachedContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + @property + def create_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.CreateCachedContentRequest], + gca_cached_content.CachedContent, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateCachedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_cached_content( + self, + ) -> Callable[[gen_ai_cache_service.DeleteCachedContentRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteCachedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.GetCachedContentRequest], cached_content.CachedContent + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetCachedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_cached_contents( + self, + ) -> Callable[ + [gen_ai_cache_service.ListCachedContentsRequest], + gen_ai_cache_service.ListCachedContentsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListCachedContents(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.UpdateCachedContentRequest], + gca_cached_content.CachedContent, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateCachedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation( + _BaseGenAiCacheServiceRestTransport._BaseGetLocation, GenAiCacheServiceRestStub + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.GetLocation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseGetLocation._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_location(request, metadata) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseGetLocation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseGetLocation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetLocation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._GetLocation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = locations_pb2.Location() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_location(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetLocation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations( + _BaseGenAiCacheServiceRestTransport._BaseListLocations, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.ListLocations") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseListLocations._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseListLocations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseListLocations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListLocations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._ListLocations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_list_locations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListLocations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy( + _BaseGenAiCacheServiceRestTransport._BaseGetIamPolicy, GenAiCacheServiceRestStub + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.GetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseGetIamPolicy._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy( + _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy, GenAiCacheServiceRestStub + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.SetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy._get_http_options() + ) + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) + + body = _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_set_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "SetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions( + _BaseGenAiCacheServiceRestTransport._BaseTestIamPermissions, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.TestIamPermissions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseTestIamPermissions._get_http_options() + ) + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_test_iam_permissions(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "TestIamPermissions", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation( + _BaseGenAiCacheServiceRestTransport._BaseCancelOperation, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.CancelOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseCancelOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseCancelOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseCancelOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.CancelOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "CancelOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._CancelOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation( + _BaseGenAiCacheServiceRestTransport._BaseDeleteOperation, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.DeleteOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseDeleteOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseDeleteOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseDeleteOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.DeleteOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "DeleteOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._DeleteOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation( + _BaseGenAiCacheServiceRestTransport._BaseGetOperation, GenAiCacheServiceRestStub + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.GetOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseGetOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseGetOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseGetOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._GetOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_get_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations( + _BaseGenAiCacheServiceRestTransport._BaseListOperations, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.ListOperations") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseListOperations._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseListOperations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseListOperations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._ListOperations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_list_operations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListOperations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation( + _BaseGenAiCacheServiceRestTransport._BaseWaitOperation, + GenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("GenAiCacheServiceRestTransport.WaitOperation") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseWaitOperation._get_http_options() + ) + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseWaitOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseWaitOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "WaitOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = GenAiCacheServiceRestTransport._WaitOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + content = response.content.decode("utf-8") + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = self._interceptor.post_wait_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "WaitOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("GenAiCacheServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py new file mode 100644 index 0000000000..1dbc81be84 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_asyncio.py @@ -0,0 +1,3036 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import google.auth + +try: + import aiohttp # type: ignore + from google.auth.aio.transport.sessions import AsyncAuthorizedSession # type: ignore + from google.api_core import rest_streaming_async # type: ignore + from google.api_core.operations_v1 import AsyncOperationsRestClient # type: ignore +except ImportError as e: # pragma: NO COVER + raise ImportError( + "`rest_asyncio` transport requires the library to be installed with the `async_rest` extra. Install the library with the `async_rest` extra using `pip install google-cloud-aiplatform[async_rest]`" + ) from e + +from google.auth.aio import credentials as ga_credentials_async # type: ignore + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from google.api_core import retry_async as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming_async # type: ignore +import google.protobuf + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore + +import json # type: ignore +import dataclasses +from typing import Any, Dict, List, Callable, Tuple, Optional, Sequence, Union + + +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content +from google.cloud.aiplatform_v1.types import gen_ai_cache_service +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + + +from .rest_base import _BaseGenAiCacheServiceRestTransport + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +import logging + +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"google-auth@{google.auth.__version__}", +) + +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + + +class AsyncGenAiCacheServiceRestInterceptor: + """Asynchronous Interceptor for GenAiCacheService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the AsyncGenAiCacheServiceRestTransport. + + .. code-block:: python + class MyCustomGenAiCacheServiceInterceptor(GenAiCacheServiceRestInterceptor): + async def pre_create_cached_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_create_cached_content(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_delete_cached_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def pre_get_cached_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_get_cached_content(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_list_cached_contents(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_list_cached_contents(self, response): + logging.log(f"Received response: {response}") + return response + + async def pre_update_cached_content(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_update_cached_content(self, response): + logging.log(f"Received response: {response}") + return response + + transport = AsyncGenAiCacheServiceRestTransport(interceptor=MyCustomGenAiCacheServiceInterceptor()) + client = async GenAiCacheServiceClient(transport=transport) + + + """ + + async def pre_create_cached_content( + self, + request: gen_ai_cache_service.CreateCachedContentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.CreateCachedContentRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_cached_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_create_cached_content( + self, response: gca_cached_content.CachedContent + ) -> gca_cached_content.CachedContent: + """Post-rpc interceptor for create_cached_content + + DEPRECATED. Please use the `post_create_cached_content_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. This `post_create_cached_content` interceptor runs + before the `post_create_cached_content_with_metadata` interceptor. + """ + return response + + async def post_create_cached_content_with_metadata( + self, + response: gca_cached_content.CachedContent, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_cached_content.CachedContent, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_cached_content + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiCacheService server but before it is returned to user code. + + We recommend only using this `post_create_cached_content_with_metadata` + interceptor in new development instead of the `post_create_cached_content` interceptor. + When both interceptors are used, this `post_create_cached_content_with_metadata` interceptor runs after the + `post_create_cached_content` interceptor. The (possibly modified) response returned by + `post_create_cached_content` will be passed to + `post_create_cached_content_with_metadata`. + """ + return response, metadata + + async def pre_delete_cached_content( + self, + request: gen_ai_cache_service.DeleteCachedContentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.DeleteCachedContentRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_cached_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def pre_get_cached_content( + self, + request: gen_ai_cache_service.GetCachedContentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.GetCachedContentRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_cached_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_get_cached_content( + self, response: cached_content.CachedContent + ) -> cached_content.CachedContent: + """Post-rpc interceptor for get_cached_content + + DEPRECATED. Please use the `post_get_cached_content_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. This `post_get_cached_content` interceptor runs + before the `post_get_cached_content_with_metadata` interceptor. + """ + return response + + async def post_get_cached_content_with_metadata( + self, + response: cached_content.CachedContent, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[cached_content.CachedContent, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_cached_content + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiCacheService server but before it is returned to user code. + + We recommend only using this `post_get_cached_content_with_metadata` + interceptor in new development instead of the `post_get_cached_content` interceptor. + When both interceptors are used, this `post_get_cached_content_with_metadata` interceptor runs after the + `post_get_cached_content` interceptor. The (possibly modified) response returned by + `post_get_cached_content` will be passed to + `post_get_cached_content_with_metadata`. + """ + return response, metadata + + async def pre_list_cached_contents( + self, + request: gen_ai_cache_service.ListCachedContentsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.ListCachedContentsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_cached_contents + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_list_cached_contents( + self, response: gen_ai_cache_service.ListCachedContentsResponse + ) -> gen_ai_cache_service.ListCachedContentsResponse: + """Post-rpc interceptor for list_cached_contents + + DEPRECATED. Please use the `post_list_cached_contents_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. This `post_list_cached_contents` interceptor runs + before the `post_list_cached_contents_with_metadata` interceptor. + """ + return response + + async def post_list_cached_contents_with_metadata( + self, + response: gen_ai_cache_service.ListCachedContentsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.ListCachedContentsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_cached_contents + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiCacheService server but before it is returned to user code. + + We recommend only using this `post_list_cached_contents_with_metadata` + interceptor in new development instead of the `post_list_cached_contents` interceptor. + When both interceptors are used, this `post_list_cached_contents_with_metadata` interceptor runs after the + `post_list_cached_contents` interceptor. The (possibly modified) response returned by + `post_list_cached_contents` will be passed to + `post_list_cached_contents_with_metadata`. + """ + return response, metadata + + async def pre_update_cached_content( + self, + request: gen_ai_cache_service.UpdateCachedContentRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gen_ai_cache_service.UpdateCachedContentRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_cached_content + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_update_cached_content( + self, response: gca_cached_content.CachedContent + ) -> gca_cached_content.CachedContent: + """Post-rpc interceptor for update_cached_content + + DEPRECATED. Please use the `post_update_cached_content_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. This `post_update_cached_content` interceptor runs + before the `post_update_cached_content_with_metadata` interceptor. + """ + return response + + async def post_update_cached_content_with_metadata( + self, + response: gca_cached_content.CachedContent, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_cached_content.CachedContent, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for update_cached_content + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiCacheService server but before it is returned to user code. + + We recommend only using this `post_update_cached_content_with_metadata` + interceptor in new development instead of the `post_update_cached_content` interceptor. + When both interceptors are used, this `post_update_cached_content_with_metadata` interceptor runs after the + `post_update_cached_content` interceptor. The (possibly modified) response returned by + `post_update_cached_content` will be passed to + `post_update_cached_content_with_metadata`. + """ + return response, metadata + + async def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.GetLocationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + locations_pb2.ListLocationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_get_iam_policy( + self, response: policy_pb2.Policy + ) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_set_iam_policy( + self, response: policy_pb2.Policy + ) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + async def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.WaitOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiCacheService server. + """ + return request, metadata + + async def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiCacheService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class AsyncGenAiCacheServiceRestStub: + _session: AsyncAuthorizedSession + _host: str + _interceptor: AsyncGenAiCacheServiceRestInterceptor + + +class AsyncGenAiCacheServiceRestTransport(_BaseGenAiCacheServiceRestTransport): + """Asynchronous REST backend transport for GenAiCacheService. + + Service for managing Vertex AI's CachedContent resource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials_async.Credentials] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + url_scheme: str = "https", + interceptor: Optional[AsyncGenAiCacheServiceRestInterceptor] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.aio.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + url_scheme (str): the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=False, + url_scheme=url_scheme, + api_audience=None, + ) + self._session = AsyncAuthorizedSession(self._credentials) # type: ignore + self._interceptor = interceptor or AsyncGenAiCacheServiceRestInterceptor() + self._wrap_with_kind = True + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.create_cached_content: self._wrap_method( + self.create_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.get_cached_content: self._wrap_method( + self.get_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.update_cached_content: self._wrap_method( + self.update_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.delete_cached_content: self._wrap_method( + self.delete_cached_content, + default_timeout=None, + client_info=client_info, + ), + self.list_cached_contents: self._wrap_method( + self.list_cached_contents, + default_timeout=None, + client_info=client_info, + ), + self.get_location: self._wrap_method( + self.get_location, + default_timeout=None, + client_info=client_info, + ), + self.list_locations: self._wrap_method( + self.list_locations, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: self._wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: self._wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: self._wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.cancel_operation: self._wrap_method( + self.cancel_operation, + default_timeout=None, + client_info=client_info, + ), + self.delete_operation: self._wrap_method( + self.delete_operation, + default_timeout=None, + client_info=client_info, + ), + self.get_operation: self._wrap_method( + self.get_operation, + default_timeout=None, + client_info=client_info, + ), + self.list_operations: self._wrap_method( + self.list_operations, + default_timeout=None, + client_info=client_info, + ), + self.wait_operation: self._wrap_method( + self.wait_operation, + default_timeout=None, + client_info=client_info, + ), + } + + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + + class _CreateCachedContent( + _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.CreateCachedContent") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: gen_ai_cache_service.CreateCachedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_cached_content.CachedContent: + r"""Call the create cached content method over HTTP. + + Args: + request (~.gen_ai_cache_service.CreateCachedContentRequest): + The request object. Request message for + [GenAiCacheService.CreateCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.CreateCachedContent]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.gca_cached_content.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_http_options() + ) + + request, metadata = await self._interceptor.pre_create_cached_content( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_transcoded_request( + http_options, request + ) + + body = _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.CreateCachedContent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "CreateCachedContent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncGenAiCacheServiceRestTransport._CreateCachedContent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = gca_cached_content.CachedContent() + pb_resp = gca_cached_content.CachedContent.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_create_cached_content(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_cached_content_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_cached_content.CachedContent.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.create_cached_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "CreateCachedContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _DeleteCachedContent( + _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.DeleteCachedContent") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: gen_ai_cache_service.DeleteCachedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete cached content method over HTTP. + + Args: + request (~.gen_ai_cache_service.DeleteCachedContentRequest): + The request object. Request message for + [GenAiCacheService.DeleteCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.DeleteCachedContent]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent._get_http_options() + ) + + request, metadata = await self._interceptor.pre_delete_cached_content( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.DeleteCachedContent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "DeleteCachedContent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncGenAiCacheServiceRestTransport._DeleteCachedContent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + class _GetCachedContent( + _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.GetCachedContent") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: gen_ai_cache_service.GetCachedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> cached_content.CachedContent: + r"""Call the get cached content method over HTTP. + + Args: + request (~.gen_ai_cache_service.GetCachedContentRequest): + The request object. Request message for + [GenAiCacheService.GetCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.GetCachedContent]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.cached_content.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_cached_content( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.GetCachedContent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetCachedContent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncGenAiCacheServiceRestTransport._GetCachedContent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = cached_content.CachedContent() + pb_resp = cached_content.CachedContent.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_get_cached_content(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_cached_content_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = cached_content.CachedContent.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.get_cached_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetCachedContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _ListCachedContents( + _BaseGenAiCacheServiceRestTransport._BaseListCachedContents, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.ListCachedContents") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: gen_ai_cache_service.ListCachedContentsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gen_ai_cache_service.ListCachedContentsResponse: + r"""Call the list cached contents method over HTTP. + + Args: + request (~.gen_ai_cache_service.ListCachedContentsRequest): + The request object. Request to list CachedContents. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.gen_ai_cache_service.ListCachedContentsResponse: + Response with a list of + CachedContents. + + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseListCachedContents._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_cached_contents( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseListCachedContents._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseListCachedContents._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.ListCachedContents", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListCachedContents", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncGenAiCacheServiceRestTransport._ListCachedContents._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = gen_ai_cache_service.ListCachedContentsResponse() + pb_resp = gen_ai_cache_service.ListCachedContentsResponse.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_list_cached_contents(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_cached_contents_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + gen_ai_cache_service.ListCachedContentsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.list_cached_contents", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListCachedContents", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + class _UpdateCachedContent( + _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.UpdateCachedContent") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: gen_ai_cache_service.UpdateCachedContentRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> gca_cached_content.CachedContent: + r"""Call the update cached content method over HTTP. + + Args: + request (~.gen_ai_cache_service.UpdateCachedContentRequest): + The request object. Request message for + [GenAiCacheService.UpdateCachedContent][google.cloud.aiplatform.v1.GenAiCacheService.UpdateCachedContent]. + Only expire_time or ttl can be updated. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.gca_cached_content.CachedContent: + A resource used in LLM queries for + users to explicitly specify what to + cache and how to cache. + + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_http_options() + ) + + request, metadata = await self._interceptor.pre_update_cached_content( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_transcoded_request( + http_options, request + ) + + body = _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.UpdateCachedContent", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "UpdateCachedContent", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncGenAiCacheServiceRestTransport._UpdateCachedContent._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = gca_cached_content.CachedContent() + pb_resp = gca_cached_content.CachedContent.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_update_cached_content(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_cached_content_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gca_cached_content.CachedContent.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.update_cached_content", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "UpdateCachedContent", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + @property + def create_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.CreateCachedContentRequest], + gca_cached_content.CachedContent, + ]: + return self._CreateCachedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_cached_content( + self, + ) -> Callable[[gen_ai_cache_service.DeleteCachedContentRequest], empty_pb2.Empty]: + return self._DeleteCachedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.GetCachedContentRequest], cached_content.CachedContent + ]: + return self._GetCachedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_cached_contents( + self, + ) -> Callable[ + [gen_ai_cache_service.ListCachedContentsRequest], + gen_ai_cache_service.ListCachedContentsResponse, + ]: + return self._ListCachedContents(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_cached_content( + self, + ) -> Callable[ + [gen_ai_cache_service.UpdateCachedContentRequest], + gca_cached_content.CachedContent, + ]: + return self._UpdateCachedContent(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation( + _BaseGenAiCacheServiceRestTransport._BaseGetLocation, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.GetLocation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.Location: + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseGetLocation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_location( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseGetLocation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseGetLocation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetLocation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncGenAiCacheServiceRestTransport._GetLocation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = locations_pb2.Location() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_location(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.GetLocation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetLocation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations( + _BaseGenAiCacheServiceRestTransport._BaseListLocations, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.ListLocations") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseListLocations._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_locations( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseListLocations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseListLocations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListLocations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncGenAiCacheServiceRestTransport._ListLocations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_list_locations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.ListLocations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListLocations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy( + _BaseGenAiCacheServiceRestTransport._BaseGetIamPolicy, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.GetIamPolicy") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseGetIamPolicy._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_iam_policy( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncGenAiCacheServiceRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.GetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy( + _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.SetIamPolicy") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy._get_http_options() + ) + + request, metadata = await self._interceptor.pre_set_iam_policy( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) + + body = _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncGenAiCacheServiceRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = policy_pb2.Policy() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_set_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.SetIamPolicy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "SetIamPolicy", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions( + _BaseGenAiCacheServiceRestTransport._BaseTestIamPermissions, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.TestIamPermissions") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseTestIamPermissions._get_http_options() + ) + + request, metadata = await self._interceptor.pre_test_iam_permissions( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncGenAiCacheServiceRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_test_iam_permissions(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.TestIamPermissions", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "TestIamPermissions", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation( + _BaseGenAiCacheServiceRestTransport._BaseCancelOperation, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.CancelOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseCancelOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_cancel_operation( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseCancelOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseCancelOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.CancelOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "CancelOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncGenAiCacheServiceRestTransport._CancelOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + return await self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation( + _BaseGenAiCacheServiceRestTransport._BaseDeleteOperation, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.DeleteOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseDeleteOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_delete_operation( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseDeleteOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseDeleteOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.DeleteOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "DeleteOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncGenAiCacheServiceRestTransport._DeleteOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + return await self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation( + _BaseGenAiCacheServiceRestTransport._BaseGetOperation, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.GetOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseGetOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_get_operation( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseGetOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseGetOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncGenAiCacheServiceRestTransport._GetOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_get_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.GetOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "GetOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations( + _BaseGenAiCacheServiceRestTransport._BaseListOperations, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.ListOperations") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseListOperations._get_http_options() + ) + + request, metadata = await self._interceptor.pre_list_operations( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseListOperations._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseListOperations._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncGenAiCacheServiceRestTransport._ListOperations._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_list_operations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.ListOperations", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "ListOperations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation( + _BaseGenAiCacheServiceRestTransport._BaseWaitOperation, + AsyncGenAiCacheServiceRestStub, + ): + def __hash__(self): + return hash("AsyncGenAiCacheServiceRestTransport.WaitOperation") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options = ( + _BaseGenAiCacheServiceRestTransport._BaseWaitOperation._get_http_options() + ) + + request, metadata = await self._interceptor.pre_wait_operation( + request, metadata + ) + transcoded_request = _BaseGenAiCacheServiceRestTransport._BaseWaitOperation._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseGenAiCacheServiceRestTransport._BaseWaitOperation._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.GenAiCacheServiceClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "WaitOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + await AsyncGenAiCacheServiceRestTransport._WaitOperation._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + content = await response.read() + resp = operations_pb2.Operation() + resp = json_format.Parse(content, resp) + resp = await self._interceptor.post_wait_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.GenAiCacheServiceAsyncClient.WaitOperation", + extra={ + "serviceName": "google.cloud.aiplatform.v1.GenAiCacheService", + "rpcName": "WaitOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) + return resp + + @property + def kind(self) -> str: + return "rest_asyncio" + + async def close(self): + await self._session.close() diff --git a/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py new file mode 100644 index 0000000000..6c840c183b --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_cache_service/transports/rest_base.py @@ -0,0 +1,2730 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from .base import GenAiCacheServiceTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.aiplatform_v1.types import cached_content +from google.cloud.aiplatform_v1.types import cached_content as gca_cached_content +from google.cloud.aiplatform_v1.types import gen_ai_cache_service +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + + +class _BaseGenAiCacheServiceRestTransport(GenAiCacheServiceTransport): + """Base REST backend transport for GenAiCacheService. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseCreateCachedContent: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/cachedContents", + "body": "cached_content", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = gen_ai_cache_service.CreateCachedContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseGenAiCacheServiceRestTransport._BaseCreateCachedContent._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteCachedContent: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/cachedContents/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = gen_ai_cache_service.DeleteCachedContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseGenAiCacheServiceRestTransport._BaseDeleteCachedContent._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetCachedContent: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/cachedContents/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = gen_ai_cache_service.GetCachedContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseGenAiCacheServiceRestTransport._BaseGetCachedContent._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListCachedContents: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/cachedContents", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = gen_ai_cache_service.ListCachedContentsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseGenAiCacheServiceRestTransport._BaseListCachedContents._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateCachedContent: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1/{cached_content.name=projects/*/locations/*/cachedContents/*}", + "body": "cached_content", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = gen_ai_cache_service.UpdateCachedContentRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseGenAiCacheServiceRestTransport._BaseUpdateCachedContent._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetLocation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseListLocations: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*}/locations", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseGetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseSetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + body = json.dumps(transcoded_request["body"]) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseTestIamPermissions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseCancelOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseDeleteOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseGetOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseListOperations: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + class _BaseWaitOperation: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + return query_params + + +__all__ = ("_BaseGenAiCacheServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/__init__.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/__init__.py index 5d13bd02e9..5ce83d83aa 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py index 2aeb8bee8e..87262e71d0 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import genai_tuning_service @@ -56,8 +55,10 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import GenAiTuningServiceGrpcAsyncIOTransport from .client import GenAiTuningServiceClient @@ -134,7 +135,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: GenAiTuningServiceAsyncClient: The constructed client. """ - return GenAiTuningServiceClient.from_service_account_info.__func__(GenAiTuningServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + GenAiTuningServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(GenAiTuningServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -150,7 +154,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: GenAiTuningServiceAsyncClient: The constructed client. """ - return GenAiTuningServiceClient.from_service_account_file.__func__(GenAiTuningServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + GenAiTuningServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(GenAiTuningServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -295,21 +302,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", + "credentialsType": None, + } + ), ) async def create_tuning_job( @@ -392,7 +401,10 @@ async def sample_create_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tuning_job]) + flattened_params = [parent, tuning_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -502,7 +514,10 @@ async def sample_get_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -617,7 +632,10 @@ async def sample_list_tuning_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -741,7 +759,10 @@ async def sample_cancel_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -869,7 +890,10 @@ async def sample_rebase_tuned_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tuned_model_ref]) + flattened_params = [parent, tuned_model_ref] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1630,5 +1654,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("GenAiTuningServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py index 358b1687ea..a7dba1e309 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import genai_tuning_service @@ -70,8 +71,10 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import GenAiTuningServiceGrpcTransport from .transports.grpc_asyncio import GenAiTuningServiceGrpcAsyncIOTransport @@ -171,6 +174,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -448,12 +479,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = GenAiTuningServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -461,7 +488,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -493,20 +520,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = GenAiTuningServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -599,6 +620,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -691,11 +739,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = GenAiTuningServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + GenAiTuningServiceClient._read_environment_variables() + ) self._client_cert_source = GenAiTuningServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -808,21 +854,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.GenAiTuningServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", + "credentialsType": None, + } + ), ) def create_tuning_job( @@ -905,7 +955,10 @@ def sample_create_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tuning_job]) + flattened_params = [parent, tuning_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1012,7 +1065,10 @@ def sample_get_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1124,7 +1180,10 @@ def sample_list_tuning_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1245,7 +1304,10 @@ def sample_cancel_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1370,7 +1432,10 @@ def sample_rebase_tuned_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tuned_model_ref]) + flattened_params = [parent, tuned_model_ref] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1477,16 +1542,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1532,16 +1601,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1704,16 +1777,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1825,16 +1902,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -1947,16 +2028,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -2007,16 +2092,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -2062,16 +2151,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -2117,21 +2210,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("GenAiTuningServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/pagers.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/pagers.py index 68e0c51bda..b50b5dcca6 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/__init__.py index f39229fd6e..a24ba52e16 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py index fd37761f90..901c5b262f 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import genai_tuning_service from google.cloud.aiplatform_v1.types import tuning_job @@ -34,12 +35,15 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class GenAiTuningServiceTransport(abc.ABC): """Abstract transport class for GenAiTuningService.""" @@ -71,9 +75,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -86,8 +91,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -102,11 +105,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -300,13 +308,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py index 799ecd7474..d27e9ee313 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO try: @@ -78,12 +78,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.GenAiTuningService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -160,9 +159,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -296,9 +296,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py index a194c5d5a6..7b27f6ab7a 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,7 +41,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO from .grpc import GenAiTuningServiceGrpcTransport @@ -156,8 +156,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -208,9 +209,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py index e815cd02ac..0c4e4a836b 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -39,7 +40,7 @@ from google.cloud.aiplatform_v1.types import genai_tuning_service from google.cloud.aiplatform_v1.types import tuning_job from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -66,6 +67,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class GenAiTuningServiceRestInterceptor: """Interceptor for GenAiTuningService. @@ -159,12 +163,35 @@ def post_create_tuning_job( ) -> gca_tuning_job.TuningJob: """Post-rpc interceptor for create_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the GenAiTuningService server but before - it is returned to user code. + it is returned to user code. This `post_create_tuning_job` interceptor runs + before the `post_create_tuning_job_with_metadata` interceptor. """ return response + def post_create_tuning_job_with_metadata( + self, + response: gca_tuning_job.TuningJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_tuning_job.TuningJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiTuningService server but before it is returned to user code. + + We recommend only using this `post_create_tuning_job_with_metadata` + interceptor in new development instead of the `post_create_tuning_job` interceptor. + When both interceptors are used, this `post_create_tuning_job_with_metadata` interceptor runs after the + `post_create_tuning_job` interceptor. The (possibly modified) response returned by + `post_create_tuning_job` will be passed to + `post_create_tuning_job_with_metadata`. + """ + return response, metadata + def pre_get_tuning_job( self, request: genai_tuning_service.GetTuningJobRequest, @@ -185,12 +212,35 @@ def post_get_tuning_job( ) -> tuning_job.TuningJob: """Post-rpc interceptor for get_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the GenAiTuningService server but before - it is returned to user code. + it is returned to user code. This `post_get_tuning_job` interceptor runs + before the `post_get_tuning_job_with_metadata` interceptor. """ return response + def post_get_tuning_job_with_metadata( + self, + response: tuning_job.TuningJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[tuning_job.TuningJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiTuningService server but before it is returned to user code. + + We recommend only using this `post_get_tuning_job_with_metadata` + interceptor in new development instead of the `post_get_tuning_job` interceptor. + When both interceptors are used, this `post_get_tuning_job_with_metadata` interceptor runs after the + `post_get_tuning_job` interceptor. The (possibly modified) response returned by + `post_get_tuning_job` will be passed to + `post_get_tuning_job_with_metadata`. + """ + return response, metadata + def pre_list_tuning_jobs( self, request: genai_tuning_service.ListTuningJobsRequest, @@ -211,12 +261,38 @@ def post_list_tuning_jobs( ) -> genai_tuning_service.ListTuningJobsResponse: """Post-rpc interceptor for list_tuning_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_tuning_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the GenAiTuningService server but before - it is returned to user code. + it is returned to user code. This `post_list_tuning_jobs` interceptor runs + before the `post_list_tuning_jobs_with_metadata` interceptor. """ return response + def post_list_tuning_jobs_with_metadata( + self, + response: genai_tuning_service.ListTuningJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + genai_tuning_service.ListTuningJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_tuning_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiTuningService server but before it is returned to user code. + + We recommend only using this `post_list_tuning_jobs_with_metadata` + interceptor in new development instead of the `post_list_tuning_jobs` interceptor. + When both interceptors are used, this `post_list_tuning_jobs_with_metadata` interceptor runs after the + `post_list_tuning_jobs` interceptor. The (possibly modified) response returned by + `post_list_tuning_jobs` will be passed to + `post_list_tuning_jobs_with_metadata`. + """ + return response, metadata + def pre_rebase_tuned_model( self, request: genai_tuning_service.RebaseTunedModelRequest, @@ -237,12 +313,35 @@ def post_rebase_tuned_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for rebase_tuned_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_rebase_tuned_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the GenAiTuningService server but before - it is returned to user code. + it is returned to user code. This `post_rebase_tuned_model` interceptor runs + before the `post_rebase_tuned_model_with_metadata` interceptor. """ return response + def post_rebase_tuned_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for rebase_tuned_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiTuningService server but before it is returned to user code. + + We recommend only using this `post_rebase_tuned_model_with_metadata` + interceptor in new development instead of the `post_rebase_tuned_model` interceptor. + When both interceptors are used, this `post_rebase_tuned_model_with_metadata` interceptor runs after the + `post_rebase_tuned_model` interceptor. The (possibly modified) response returned by + `post_rebase_tuned_model` will be passed to + `post_rebase_tuned_model_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -532,9 +631,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -745,6 +845,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -881,6 +985,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -889,6 +997,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -929,6 +1045,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1091,6 +1211,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1119,6 +1243,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1231,6 +1359,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1239,6 +1371,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1299,6 +1439,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1461,6 +1605,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1501,6 +1649,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1613,6 +1765,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1621,6 +1777,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1681,6 +1845,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -1839,6 +2007,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1879,6 +2051,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -1991,6 +2167,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2011,6 +2195,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2059,6 +2247,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2225,6 +2417,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2257,6 +2453,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2365,6 +2565,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2373,6 +2577,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2433,6 +2645,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2534,7 +2750,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2699,6 +2915,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_tuning_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2844,6 +3064,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_tuning_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2991,6 +3215,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_tuning_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_tuning_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3105,7 +3333,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3145,6 +3373,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_rebase_tuned_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_rebase_tuned_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3257,7 +3489,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -3400,7 +3631,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3543,7 +3773,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -3687,7 +3916,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -3835,7 +4063,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -3982,7 +4209,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4099,7 +4325,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4216,7 +4441,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -4359,7 +4583,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -4502,7 +4725,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_asyncio.py index 32d7faf2e7..5d64547d39 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -53,7 +53,7 @@ from google.cloud.aiplatform_v1.types import genai_tuning_service from google.cloud.aiplatform_v1.types import tuning_job from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -84,6 +84,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncGenAiTuningServiceRestInterceptor: """Asynchronous Interceptor for GenAiTuningService. @@ -177,12 +180,35 @@ async def post_create_tuning_job( ) -> gca_tuning_job.TuningJob: """Post-rpc interceptor for create_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the GenAiTuningService server but before - it is returned to user code. + it is returned to user code. This `post_create_tuning_job` interceptor runs + before the `post_create_tuning_job_with_metadata` interceptor. """ return response + async def post_create_tuning_job_with_metadata( + self, + response: gca_tuning_job.TuningJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_tuning_job.TuningJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiTuningService server but before it is returned to user code. + + We recommend only using this `post_create_tuning_job_with_metadata` + interceptor in new development instead of the `post_create_tuning_job` interceptor. + When both interceptors are used, this `post_create_tuning_job_with_metadata` interceptor runs after the + `post_create_tuning_job` interceptor. The (possibly modified) response returned by + `post_create_tuning_job` will be passed to + `post_create_tuning_job_with_metadata`. + """ + return response, metadata + async def pre_get_tuning_job( self, request: genai_tuning_service.GetTuningJobRequest, @@ -203,12 +229,35 @@ async def post_get_tuning_job( ) -> tuning_job.TuningJob: """Post-rpc interceptor for get_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the GenAiTuningService server but before - it is returned to user code. + it is returned to user code. This `post_get_tuning_job` interceptor runs + before the `post_get_tuning_job_with_metadata` interceptor. """ return response + async def post_get_tuning_job_with_metadata( + self, + response: tuning_job.TuningJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[tuning_job.TuningJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiTuningService server but before it is returned to user code. + + We recommend only using this `post_get_tuning_job_with_metadata` + interceptor in new development instead of the `post_get_tuning_job` interceptor. + When both interceptors are used, this `post_get_tuning_job_with_metadata` interceptor runs after the + `post_get_tuning_job` interceptor. The (possibly modified) response returned by + `post_get_tuning_job` will be passed to + `post_get_tuning_job_with_metadata`. + """ + return response, metadata + async def pre_list_tuning_jobs( self, request: genai_tuning_service.ListTuningJobsRequest, @@ -229,12 +278,38 @@ async def post_list_tuning_jobs( ) -> genai_tuning_service.ListTuningJobsResponse: """Post-rpc interceptor for list_tuning_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_tuning_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the GenAiTuningService server but before - it is returned to user code. + it is returned to user code. This `post_list_tuning_jobs` interceptor runs + before the `post_list_tuning_jobs_with_metadata` interceptor. """ return response + async def post_list_tuning_jobs_with_metadata( + self, + response: genai_tuning_service.ListTuningJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + genai_tuning_service.ListTuningJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_tuning_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiTuningService server but before it is returned to user code. + + We recommend only using this `post_list_tuning_jobs_with_metadata` + interceptor in new development instead of the `post_list_tuning_jobs` interceptor. + When both interceptors are used, this `post_list_tuning_jobs_with_metadata` interceptor runs after the + `post_list_tuning_jobs` interceptor. The (possibly modified) response returned by + `post_list_tuning_jobs` will be passed to + `post_list_tuning_jobs_with_metadata`. + """ + return response, metadata + async def pre_rebase_tuned_model( self, request: genai_tuning_service.RebaseTunedModelRequest, @@ -255,12 +330,35 @@ async def post_rebase_tuned_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for rebase_tuned_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_rebase_tuned_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the GenAiTuningService server but before - it is returned to user code. + it is returned to user code. This `post_rebase_tuned_model` interceptor runs + before the `post_rebase_tuned_model_with_metadata` interceptor. """ return response + async def post_rebase_tuned_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for rebase_tuned_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the GenAiTuningService server but before it is returned to user code. + + We recommend only using this `post_rebase_tuned_model_with_metadata` + interceptor in new development instead of the `post_rebase_tuned_model` interceptor. + When both interceptors are used, this `post_rebase_tuned_model_with_metadata` interceptor runs after the + `post_rebase_tuned_model` interceptor. The (possibly modified) response returned by + `post_rebase_tuned_model` will be passed to + `post_rebase_tuned_model_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -569,9 +667,9 @@ def __init__( self._interceptor = interceptor or AsyncGenAiTuningServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -740,7 +838,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -916,6 +1014,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_tuning_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1071,6 +1173,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_tuning_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1224,6 +1330,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_tuning_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_tuning_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1339,7 +1449,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1386,6 +1496,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_rebase_tuned_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_rebase_tuned_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1581,6 +1695,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1717,6 +1835,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1725,6 +1847,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1765,6 +1895,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1927,6 +2061,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1955,6 +2093,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2067,6 +2209,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2075,6 +2221,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2135,6 +2289,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2297,6 +2455,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2337,6 +2499,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2449,6 +2615,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2457,6 +2627,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2517,6 +2695,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2675,6 +2857,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2715,6 +2901,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2827,6 +3017,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2847,6 +3045,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2895,6 +3097,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3061,6 +3267,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3093,6 +3303,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3201,6 +3415,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3209,6 +3427,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3269,6 +3495,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3366,7 +3596,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -3519,7 +3748,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3672,7 +3900,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -3826,7 +4053,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -3984,7 +4210,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4135,7 +4360,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4258,7 +4482,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4381,7 +4604,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -4534,7 +4756,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -4685,7 +4906,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_base.py index 6a4f2f875c..77a742039b 100644 --- a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,7 +30,7 @@ from google.cloud.aiplatform_v1.types import genai_tuning_service from google.cloud.aiplatform_v1.types import tuning_job from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -482,6 +482,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -568,6 +572,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -646,6 +655,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -827,6 +840,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -963,6 +980,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -971,6 +992,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1011,6 +1040,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1192,6 +1225,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1220,6 +1257,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1332,6 +1373,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1340,6 +1385,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1400,6 +1453,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1581,6 +1638,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1621,6 +1682,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1733,6 +1798,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1741,6 +1810,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1801,6 +1878,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1978,6 +2059,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2018,6 +2103,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2130,6 +2219,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2150,6 +2247,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2198,6 +2299,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2383,6 +2488,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2415,6 +2524,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2523,6 +2636,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2531,6 +2648,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2591,6 +2716,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py index 0b0b2780e1..0cde92313e 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py index c9e5fbef72..18ac885345 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import index_endpoint @@ -57,9 +56,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport from .client import IndexEndpointServiceClient @@ -134,7 +135,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: IndexEndpointServiceAsyncClient: The constructed client. """ - return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + IndexEndpointServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(IndexEndpointServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -150,7 +154,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: IndexEndpointServiceAsyncClient: The constructed client. """ - return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + IndexEndpointServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -295,21 +302,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", + "credentialsType": None, + } + ), ) async def create_index_endpoint( @@ -398,7 +407,10 @@ async def sample_create_index_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index_endpoint]) + flattened_params = [parent, index_endpoint] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -520,7 +532,10 @@ async def sample_get_index_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -635,7 +650,10 @@ async def sample_list_index_endpoints(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -768,7 +786,10 @@ async def sample_update_index_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, update_mask]) + flattened_params = [index_endpoint, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -897,7 +918,10 @@ async def sample_delete_index_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1037,7 +1061,10 @@ async def sample_deploy_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) + flattened_params = [index_endpoint, deployed_index] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1177,7 +1204,10 @@ async def sample_undeploy_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index_id]) + flattened_params = [index_endpoint, deployed_index_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1323,7 +1353,10 @@ async def sample_mutate_deployed_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) + flattened_params = [index_endpoint, deployed_index] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2086,5 +2119,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("IndexEndpointServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py index c392d30ce9..b67a668a9f 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import index_endpoint @@ -71,9 +72,11 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import IndexEndpointServiceGrpcTransport from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport @@ -173,6 +176,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -404,12 +435,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = IndexEndpointServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -417,7 +444,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -449,20 +476,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = IndexEndpointServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -555,6 +576,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -647,11 +695,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = IndexEndpointServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + IndexEndpointServiceClient._read_environment_variables() + ) self._client_cert_source = IndexEndpointServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -764,21 +810,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.IndexEndpointServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", + "credentialsType": None, + } + ), ) def create_index_endpoint( @@ -867,7 +917,10 @@ def sample_create_index_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index_endpoint]) + flattened_params = [parent, index_endpoint] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -986,7 +1039,10 @@ def sample_get_index_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1098,7 +1154,10 @@ def sample_list_index_endpoints(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1228,7 +1287,10 @@ def sample_update_index_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, update_mask]) + flattened_params = [index_endpoint, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1354,7 +1416,10 @@ def sample_delete_index_endpoint(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1491,7 +1556,10 @@ def sample_deploy_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) + flattened_params = [index_endpoint, deployed_index] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1628,7 +1696,10 @@ def sample_undeploy_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index_id]) + flattened_params = [index_endpoint, deployed_index_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1771,7 +1842,10 @@ def sample_mutate_deployed_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) + flattened_params = [index_endpoint, deployed_index] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1880,16 +1954,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1935,16 +2013,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -2107,16 +2189,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -2228,16 +2314,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -2350,16 +2440,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -2410,16 +2504,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -2465,16 +2563,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -2520,21 +2622,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("IndexEndpointServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py index 34371d5b22..ce0894c9a7 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py index 948f919d8a..9026c2c90d 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py index 52975c2bba..02a248601e 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import index_endpoint from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint @@ -39,6 +40,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class IndexEndpointServiceTransport(abc.ABC): """Abstract transport class for IndexEndpointService.""" @@ -70,9 +74,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -85,8 +90,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -101,11 +104,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -344,13 +352,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py index 57d8db4850..8fe97d578b 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -77,12 +77,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.IndexEndpointService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -159,9 +158,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -295,9 +295,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py index 2ed580ff3e..cd95a791fe 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -155,8 +155,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -207,9 +208,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py index bd5d20e18e..971b0614c4 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -65,6 +66,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class IndexEndpointServiceRestInterceptor: """Interceptor for IndexEndpointService. @@ -171,12 +175,35 @@ def post_create_index_endpoint( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_index_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_index_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_create_index_endpoint` interceptor runs + before the `post_create_index_endpoint_with_metadata` interceptor. """ return response + def post_create_index_endpoint_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_index_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_create_index_endpoint_with_metadata` + interceptor in new development instead of the `post_create_index_endpoint` interceptor. + When both interceptors are used, this `post_create_index_endpoint_with_metadata` interceptor runs after the + `post_create_index_endpoint` interceptor. The (possibly modified) response returned by + `post_create_index_endpoint` will be passed to + `post_create_index_endpoint_with_metadata`. + """ + return response, metadata + def pre_delete_index_endpoint( self, request: index_endpoint_service.DeleteIndexEndpointRequest, @@ -197,12 +224,35 @@ def post_delete_index_endpoint( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_index_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_index_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_delete_index_endpoint` interceptor runs + before the `post_delete_index_endpoint_with_metadata` interceptor. """ return response + def post_delete_index_endpoint_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_index_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_delete_index_endpoint_with_metadata` + interceptor in new development instead of the `post_delete_index_endpoint` interceptor. + When both interceptors are used, this `post_delete_index_endpoint_with_metadata` interceptor runs after the + `post_delete_index_endpoint` interceptor. The (possibly modified) response returned by + `post_delete_index_endpoint` will be passed to + `post_delete_index_endpoint_with_metadata`. + """ + return response, metadata + def pre_deploy_index( self, request: index_endpoint_service.DeployIndexRequest, @@ -223,12 +273,35 @@ def post_deploy_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for deploy_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_deploy_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_deploy_index` interceptor runs + before the `post_deploy_index_with_metadata` interceptor. """ return response + def post_deploy_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for deploy_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_deploy_index_with_metadata` + interceptor in new development instead of the `post_deploy_index` interceptor. + When both interceptors are used, this `post_deploy_index_with_metadata` interceptor runs after the + `post_deploy_index` interceptor. The (possibly modified) response returned by + `post_deploy_index` will be passed to + `post_deploy_index_with_metadata`. + """ + return response, metadata + def pre_get_index_endpoint( self, request: index_endpoint_service.GetIndexEndpointRequest, @@ -249,12 +322,35 @@ def post_get_index_endpoint( ) -> index_endpoint.IndexEndpoint: """Post-rpc interceptor for get_index_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_index_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_get_index_endpoint` interceptor runs + before the `post_get_index_endpoint_with_metadata` interceptor. """ return response + def post_get_index_endpoint_with_metadata( + self, + response: index_endpoint.IndexEndpoint, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[index_endpoint.IndexEndpoint, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_index_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_get_index_endpoint_with_metadata` + interceptor in new development instead of the `post_get_index_endpoint` interceptor. + When both interceptors are used, this `post_get_index_endpoint_with_metadata` interceptor runs after the + `post_get_index_endpoint` interceptor. The (possibly modified) response returned by + `post_get_index_endpoint` will be passed to + `post_get_index_endpoint_with_metadata`. + """ + return response, metadata + def pre_list_index_endpoints( self, request: index_endpoint_service.ListIndexEndpointsRequest, @@ -275,12 +371,38 @@ def post_list_index_endpoints( ) -> index_endpoint_service.ListIndexEndpointsResponse: """Post-rpc interceptor for list_index_endpoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_index_endpoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_list_index_endpoints` interceptor runs + before the `post_list_index_endpoints_with_metadata` interceptor. """ return response + def post_list_index_endpoints_with_metadata( + self, + response: index_endpoint_service.ListIndexEndpointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + index_endpoint_service.ListIndexEndpointsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_index_endpoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_list_index_endpoints_with_metadata` + interceptor in new development instead of the `post_list_index_endpoints` interceptor. + When both interceptors are used, this `post_list_index_endpoints_with_metadata` interceptor runs after the + `post_list_index_endpoints` interceptor. The (possibly modified) response returned by + `post_list_index_endpoints` will be passed to + `post_list_index_endpoints_with_metadata`. + """ + return response, metadata + def pre_mutate_deployed_index( self, request: index_endpoint_service.MutateDeployedIndexRequest, @@ -301,12 +423,35 @@ def post_mutate_deployed_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for mutate_deployed_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_mutate_deployed_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_mutate_deployed_index` interceptor runs + before the `post_mutate_deployed_index_with_metadata` interceptor. """ return response + def post_mutate_deployed_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for mutate_deployed_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_mutate_deployed_index_with_metadata` + interceptor in new development instead of the `post_mutate_deployed_index` interceptor. + When both interceptors are used, this `post_mutate_deployed_index_with_metadata` interceptor runs after the + `post_mutate_deployed_index` interceptor. The (possibly modified) response returned by + `post_mutate_deployed_index` will be passed to + `post_mutate_deployed_index_with_metadata`. + """ + return response, metadata + def pre_undeploy_index( self, request: index_endpoint_service.UndeployIndexRequest, @@ -327,12 +472,35 @@ def post_undeploy_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for undeploy_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_undeploy_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_undeploy_index` interceptor runs + before the `post_undeploy_index_with_metadata` interceptor. """ return response + def post_undeploy_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for undeploy_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_undeploy_index_with_metadata` + interceptor in new development instead of the `post_undeploy_index` interceptor. + When both interceptors are used, this `post_undeploy_index_with_metadata` interceptor runs after the + `post_undeploy_index` interceptor. The (possibly modified) response returned by + `post_undeploy_index` will be passed to + `post_undeploy_index_with_metadata`. + """ + return response, metadata + def pre_update_index_endpoint( self, request: index_endpoint_service.UpdateIndexEndpointRequest, @@ -353,12 +521,37 @@ def post_update_index_endpoint( ) -> gca_index_endpoint.IndexEndpoint: """Post-rpc interceptor for update_index_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_index_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_update_index_endpoint` interceptor runs + before the `post_update_index_endpoint_with_metadata` interceptor. """ return response + def post_update_index_endpoint_with_metadata( + self, + response: gca_index_endpoint.IndexEndpoint, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_index_endpoint.IndexEndpoint, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for update_index_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_update_index_endpoint_with_metadata` + interceptor in new development instead of the `post_update_index_endpoint` interceptor. + When both interceptors are used, this `post_update_index_endpoint_with_metadata` interceptor runs after the + `post_update_index_endpoint` interceptor. The (possibly modified) response returned by + `post_update_index_endpoint` will be passed to + `post_update_index_endpoint_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -648,9 +841,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -861,6 +1055,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -997,6 +1195,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1005,6 +1207,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1045,6 +1255,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1207,6 +1421,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1235,6 +1453,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1347,6 +1569,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1355,6 +1581,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1415,6 +1649,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1577,6 +1815,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1617,6 +1859,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1729,6 +1975,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1737,6 +1987,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1797,6 +2055,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -1955,6 +2217,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1995,6 +2261,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2107,6 +2377,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2127,6 +2405,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2175,6 +2457,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2341,6 +2627,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2373,6 +2663,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2481,6 +2775,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2489,6 +2787,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2549,6 +2855,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2657,7 +2967,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2699,6 +3009,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_index_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_index_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2806,7 +3120,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2847,6 +3161,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_index_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_index_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2957,7 +3275,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2997,6 +3315,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_deploy_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_deploy_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3147,6 +3469,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_index_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_index_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3296,6 +3622,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_index_endpoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_index_endpoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3412,7 +3742,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3454,6 +3784,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_mutate_deployed_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_mutate_deployed_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3564,7 +3898,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3604,6 +3938,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_undeploy_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_undeploy_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3760,6 +4098,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_index_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_index_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3909,7 +4251,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -4052,7 +4393,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -4195,7 +4535,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4339,7 +4678,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4487,7 +4825,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4634,7 +4971,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4751,7 +5087,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4868,7 +5203,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -5011,7 +5345,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -5154,7 +5487,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py index 8c4035f19e..015ca51aa4 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -83,6 +83,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncIndexEndpointServiceRestInterceptor: """Asynchronous Interceptor for IndexEndpointService. @@ -189,12 +192,35 @@ async def post_create_index_endpoint( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_index_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_index_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_create_index_endpoint` interceptor runs + before the `post_create_index_endpoint_with_metadata` interceptor. """ return response + async def post_create_index_endpoint_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_index_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_create_index_endpoint_with_metadata` + interceptor in new development instead of the `post_create_index_endpoint` interceptor. + When both interceptors are used, this `post_create_index_endpoint_with_metadata` interceptor runs after the + `post_create_index_endpoint` interceptor. The (possibly modified) response returned by + `post_create_index_endpoint` will be passed to + `post_create_index_endpoint_with_metadata`. + """ + return response, metadata + async def pre_delete_index_endpoint( self, request: index_endpoint_service.DeleteIndexEndpointRequest, @@ -215,12 +241,35 @@ async def post_delete_index_endpoint( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_index_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_index_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_delete_index_endpoint` interceptor runs + before the `post_delete_index_endpoint_with_metadata` interceptor. """ return response + async def post_delete_index_endpoint_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_index_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_delete_index_endpoint_with_metadata` + interceptor in new development instead of the `post_delete_index_endpoint` interceptor. + When both interceptors are used, this `post_delete_index_endpoint_with_metadata` interceptor runs after the + `post_delete_index_endpoint` interceptor. The (possibly modified) response returned by + `post_delete_index_endpoint` will be passed to + `post_delete_index_endpoint_with_metadata`. + """ + return response, metadata + async def pre_deploy_index( self, request: index_endpoint_service.DeployIndexRequest, @@ -241,12 +290,35 @@ async def post_deploy_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for deploy_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_deploy_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_deploy_index` interceptor runs + before the `post_deploy_index_with_metadata` interceptor. """ return response + async def post_deploy_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for deploy_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_deploy_index_with_metadata` + interceptor in new development instead of the `post_deploy_index` interceptor. + When both interceptors are used, this `post_deploy_index_with_metadata` interceptor runs after the + `post_deploy_index` interceptor. The (possibly modified) response returned by + `post_deploy_index` will be passed to + `post_deploy_index_with_metadata`. + """ + return response, metadata + async def pre_get_index_endpoint( self, request: index_endpoint_service.GetIndexEndpointRequest, @@ -267,12 +339,35 @@ async def post_get_index_endpoint( ) -> index_endpoint.IndexEndpoint: """Post-rpc interceptor for get_index_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_index_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_get_index_endpoint` interceptor runs + before the `post_get_index_endpoint_with_metadata` interceptor. """ return response + async def post_get_index_endpoint_with_metadata( + self, + response: index_endpoint.IndexEndpoint, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[index_endpoint.IndexEndpoint, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_index_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_get_index_endpoint_with_metadata` + interceptor in new development instead of the `post_get_index_endpoint` interceptor. + When both interceptors are used, this `post_get_index_endpoint_with_metadata` interceptor runs after the + `post_get_index_endpoint` interceptor. The (possibly modified) response returned by + `post_get_index_endpoint` will be passed to + `post_get_index_endpoint_with_metadata`. + """ + return response, metadata + async def pre_list_index_endpoints( self, request: index_endpoint_service.ListIndexEndpointsRequest, @@ -293,12 +388,38 @@ async def post_list_index_endpoints( ) -> index_endpoint_service.ListIndexEndpointsResponse: """Post-rpc interceptor for list_index_endpoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_index_endpoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_list_index_endpoints` interceptor runs + before the `post_list_index_endpoints_with_metadata` interceptor. """ return response + async def post_list_index_endpoints_with_metadata( + self, + response: index_endpoint_service.ListIndexEndpointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + index_endpoint_service.ListIndexEndpointsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_index_endpoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_list_index_endpoints_with_metadata` + interceptor in new development instead of the `post_list_index_endpoints` interceptor. + When both interceptors are used, this `post_list_index_endpoints_with_metadata` interceptor runs after the + `post_list_index_endpoints` interceptor. The (possibly modified) response returned by + `post_list_index_endpoints` will be passed to + `post_list_index_endpoints_with_metadata`. + """ + return response, metadata + async def pre_mutate_deployed_index( self, request: index_endpoint_service.MutateDeployedIndexRequest, @@ -319,12 +440,35 @@ async def post_mutate_deployed_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for mutate_deployed_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_mutate_deployed_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_mutate_deployed_index` interceptor runs + before the `post_mutate_deployed_index_with_metadata` interceptor. """ return response + async def post_mutate_deployed_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for mutate_deployed_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_mutate_deployed_index_with_metadata` + interceptor in new development instead of the `post_mutate_deployed_index` interceptor. + When both interceptors are used, this `post_mutate_deployed_index_with_metadata` interceptor runs after the + `post_mutate_deployed_index` interceptor. The (possibly modified) response returned by + `post_mutate_deployed_index` will be passed to + `post_mutate_deployed_index_with_metadata`. + """ + return response, metadata + async def pre_undeploy_index( self, request: index_endpoint_service.UndeployIndexRequest, @@ -345,12 +489,35 @@ async def post_undeploy_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for undeploy_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_undeploy_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_undeploy_index` interceptor runs + before the `post_undeploy_index_with_metadata` interceptor. """ return response + async def post_undeploy_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for undeploy_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_undeploy_index_with_metadata` + interceptor in new development instead of the `post_undeploy_index` interceptor. + When both interceptors are used, this `post_undeploy_index_with_metadata` interceptor runs after the + `post_undeploy_index` interceptor. The (possibly modified) response returned by + `post_undeploy_index` will be passed to + `post_undeploy_index_with_metadata`. + """ + return response, metadata + async def pre_update_index_endpoint( self, request: index_endpoint_service.UpdateIndexEndpointRequest, @@ -371,12 +538,37 @@ async def post_update_index_endpoint( ) -> gca_index_endpoint.IndexEndpoint: """Post-rpc interceptor for update_index_endpoint - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_index_endpoint_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexEndpointService server but before - it is returned to user code. + it is returned to user code. This `post_update_index_endpoint` interceptor runs + before the `post_update_index_endpoint_with_metadata` interceptor. """ return response + async def post_update_index_endpoint_with_metadata( + self, + response: gca_index_endpoint.IndexEndpoint, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_index_endpoint.IndexEndpoint, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for update_index_endpoint + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexEndpointService server but before it is returned to user code. + + We recommend only using this `post_update_index_endpoint_with_metadata` + interceptor in new development instead of the `post_update_index_endpoint` interceptor. + When both interceptors are used, this `post_update_index_endpoint_with_metadata` interceptor runs after the + `post_update_index_endpoint` interceptor. The (possibly modified) response returned by + `post_update_index_endpoint` will be passed to + `post_update_index_endpoint_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -685,9 +877,9 @@ def __init__( self._interceptor = interceptor or AsyncIndexEndpointServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -878,7 +1070,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -925,6 +1117,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_index_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_index_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1033,7 +1229,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1079,6 +1275,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_index_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_index_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1192,7 +1392,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1241,6 +1441,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_deploy_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_deploy_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1395,6 +1599,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_index_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_index_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1548,6 +1756,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_index_endpoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_index_endpoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1665,7 +1877,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1712,6 +1924,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_mutate_deployed_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_mutate_deployed_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1825,7 +2041,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1872,6 +2088,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_undeploy_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_undeploy_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2032,6 +2252,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_index_endpoint(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_index_endpoint_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2229,6 +2453,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -2365,6 +2593,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -2373,6 +2605,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2413,6 +2653,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2575,6 +2819,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2603,6 +2851,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2715,6 +2967,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2723,6 +2979,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2783,6 +3047,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2945,6 +3213,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2985,6 +3257,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -3097,6 +3373,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3105,6 +3385,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3165,6 +3453,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3323,6 +3615,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -3363,6 +3659,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -3475,6 +3775,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3495,6 +3803,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -3543,6 +3855,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3709,6 +4025,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3741,6 +4061,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3849,6 +4173,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3857,6 +4185,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3917,6 +4253,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -4043,7 +4383,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -4196,7 +4535,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -4347,7 +4685,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4499,7 +4836,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4655,7 +4991,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4806,7 +5141,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4929,7 +5263,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -5052,7 +5385,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -5203,7 +5535,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -5354,7 +5685,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py index 2a4ce2e09c..1104824ad0 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -644,6 +644,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -730,6 +734,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -808,6 +817,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -989,6 +1002,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1125,6 +1142,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1133,6 +1154,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1173,6 +1202,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1354,6 +1387,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1382,6 +1419,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1494,6 +1535,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1502,6 +1547,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1562,6 +1615,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1743,6 +1800,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1783,6 +1844,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1895,6 +1960,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1903,6 +1972,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1963,6 +2040,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2140,6 +2221,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2180,6 +2265,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2292,6 +2381,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2312,6 +2409,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2360,6 +2461,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2545,6 +2650,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2577,6 +2686,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2685,6 +2798,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2693,6 +2810,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2753,6 +2878,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/index_service/__init__.py b/google/cloud/aiplatform_v1/services/index_service/__init__.py index aca05a8166..e9750e7565 100644 --- a/google/cloud/aiplatform_v1/services/index_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/index_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/index_service/async_client.py b/google/cloud/aiplatform_v1/services/index_service/async_client.py index aae705c1d8..287ded362b 100644 --- a/google/cloud/aiplatform_v1/services/index_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.index_service import pagers from google.cloud.aiplatform_v1.types import deployed_index_ref from google.cloud.aiplatform_v1.types import encryption_spec @@ -57,10 +56,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport from .client import IndexServiceClient @@ -129,7 +130,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: IndexServiceAsyncClient: The constructed client. """ - return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + IndexServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(IndexServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -145,7 +149,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: IndexServiceAsyncClient: The constructed client. """ - return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + IndexServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(IndexServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -286,21 +293,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.IndexServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.IndexService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.IndexService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.IndexService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.IndexService", + "credentialsType": None, + } + ), ) async def create_index( @@ -386,7 +395,10 @@ async def sample_create_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index]) + flattened_params = [parent, index] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -506,7 +518,10 @@ async def sample_get_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -619,7 +634,10 @@ async def sample_list_indexes(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -757,7 +775,10 @@ async def sample_update_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index, update_mask]) + flattened_params = [index, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -894,7 +915,10 @@ async def sample_delete_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1825,5 +1849,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("IndexServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/index_service/client.py b/google/cloud/aiplatform_v1/services/index_service/client.py index f05d36dbcb..09b22de5d9 100644 --- a/google/cloud/aiplatform_v1/services/index_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.index_service import pagers from google.cloud.aiplatform_v1.types import deployed_index_ref from google.cloud.aiplatform_v1.types import encryption_spec @@ -71,10 +72,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import IndexServiceGrpcTransport from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport @@ -174,6 +177,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -383,12 +414,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = IndexServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -396,7 +423,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -428,20 +455,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = IndexServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -534,6 +555,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -622,11 +670,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = IndexServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + IndexServiceClient._read_environment_variables() + ) self._client_cert_source = IndexServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -735,21 +781,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.IndexServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.IndexService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.IndexService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.IndexService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.IndexService", + "credentialsType": None, + } + ), ) def create_index( @@ -835,7 +885,10 @@ def sample_create_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index]) + flattened_params = [parent, index] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -952,7 +1005,10 @@ def sample_get_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1062,7 +1118,10 @@ def sample_list_indexes(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1197,7 +1256,10 @@ def sample_update_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([index, update_mask]) + flattened_params = [index, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1331,7 +1393,10 @@ def sample_delete_index(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1604,16 +1669,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1659,16 +1728,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1831,16 +1904,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1952,16 +2029,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -2074,16 +2155,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -2134,16 +2219,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -2189,16 +2278,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -2244,21 +2337,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("IndexServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/index_service/pagers.py b/google/cloud/aiplatform_v1/services/index_service/pagers.py index fcaffeb57b..34df5adebe 100644 --- a/google/cloud/aiplatform_v1/services/index_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/index_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py index 4867c107dd..7e67aeafd7 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_service/transports/base.py index bf950b22e4..5dd9cd7775 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import index from google.cloud.aiplatform_v1.types import index_service @@ -38,6 +39,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class IndexServiceTransport(abc.ABC): """Abstract transport class for IndexService.""" @@ -69,9 +73,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -84,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -100,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -331,13 +339,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py index 81c04a3dc5..5aa1afb6c7 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -76,12 +76,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.IndexService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -159,9 +158,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -295,9 +295,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py index 701c0d40b2..bb6419019e 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -155,8 +155,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -207,9 +208,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_service/transports/rest.py index f96b259ec2..aac621ff0f 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -64,6 +65,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class IndexServiceRestInterceptor: """Interceptor for IndexService. @@ -161,12 +165,35 @@ def post_create_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_create_index` interceptor runs + before the `post_create_index_with_metadata` interceptor. """ return response + def post_create_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_create_index_with_metadata` + interceptor in new development instead of the `post_create_index` interceptor. + When both interceptors are used, this `post_create_index_with_metadata` interceptor runs after the + `post_create_index` interceptor. The (possibly modified) response returned by + `post_create_index` will be passed to + `post_create_index_with_metadata`. + """ + return response, metadata + def pre_delete_index( self, request: index_service.DeleteIndexRequest, @@ -186,12 +213,35 @@ def post_delete_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_delete_index` interceptor runs + before the `post_delete_index_with_metadata` interceptor. """ return response + def post_delete_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_delete_index_with_metadata` + interceptor in new development instead of the `post_delete_index` interceptor. + When both interceptors are used, this `post_delete_index_with_metadata` interceptor runs after the + `post_delete_index` interceptor. The (possibly modified) response returned by + `post_delete_index` will be passed to + `post_delete_index_with_metadata`. + """ + return response, metadata + def pre_get_index( self, request: index_service.GetIndexRequest, @@ -207,12 +257,33 @@ def pre_get_index( def post_get_index(self, response: index.Index) -> index.Index: """Post-rpc interceptor for get_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_get_index` interceptor runs + before the `post_get_index_with_metadata` interceptor. """ return response + def post_get_index_with_metadata( + self, response: index.Index, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[index.Index, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_get_index_with_metadata` + interceptor in new development instead of the `post_get_index` interceptor. + When both interceptors are used, this `post_get_index_with_metadata` interceptor runs after the + `post_get_index` interceptor. The (possibly modified) response returned by + `post_get_index` will be passed to + `post_get_index_with_metadata`. + """ + return response, metadata + def pre_list_indexes( self, request: index_service.ListIndexesRequest, @@ -232,12 +303,37 @@ def post_list_indexes( ) -> index_service.ListIndexesResponse: """Post-rpc interceptor for list_indexes - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_indexes_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_list_indexes` interceptor runs + before the `post_list_indexes_with_metadata` interceptor. """ return response + def post_list_indexes_with_metadata( + self, + response: index_service.ListIndexesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + index_service.ListIndexesResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_indexes + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_list_indexes_with_metadata` + interceptor in new development instead of the `post_list_indexes` interceptor. + When both interceptors are used, this `post_list_indexes_with_metadata` interceptor runs after the + `post_list_indexes` interceptor. The (possibly modified) response returned by + `post_list_indexes` will be passed to + `post_list_indexes_with_metadata`. + """ + return response, metadata + def pre_remove_datapoints( self, request: index_service.RemoveDatapointsRequest, @@ -257,12 +353,37 @@ def post_remove_datapoints( ) -> index_service.RemoveDatapointsResponse: """Post-rpc interceptor for remove_datapoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_remove_datapoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_remove_datapoints` interceptor runs + before the `post_remove_datapoints_with_metadata` interceptor. """ return response + def post_remove_datapoints_with_metadata( + self, + response: index_service.RemoveDatapointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + index_service.RemoveDatapointsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for remove_datapoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_remove_datapoints_with_metadata` + interceptor in new development instead of the `post_remove_datapoints` interceptor. + When both interceptors are used, this `post_remove_datapoints_with_metadata` interceptor runs after the + `post_remove_datapoints` interceptor. The (possibly modified) response returned by + `post_remove_datapoints` will be passed to + `post_remove_datapoints_with_metadata`. + """ + return response, metadata + def pre_update_index( self, request: index_service.UpdateIndexRequest, @@ -282,12 +403,35 @@ def post_update_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_update_index` interceptor runs + before the `post_update_index_with_metadata` interceptor. """ return response + def post_update_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_update_index_with_metadata` + interceptor in new development instead of the `post_update_index` interceptor. + When both interceptors are used, this `post_update_index_with_metadata` interceptor runs after the + `post_update_index` interceptor. The (possibly modified) response returned by + `post_update_index` will be passed to + `post_update_index_with_metadata`. + """ + return response, metadata + def pre_upsert_datapoints( self, request: index_service.UpsertDatapointsRequest, @@ -307,12 +451,37 @@ def post_upsert_datapoints( ) -> index_service.UpsertDatapointsResponse: """Post-rpc interceptor for upsert_datapoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_upsert_datapoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_upsert_datapoints` interceptor runs + before the `post_upsert_datapoints_with_metadata` interceptor. """ return response + def post_upsert_datapoints_with_metadata( + self, + response: index_service.UpsertDatapointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + index_service.UpsertDatapointsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for upsert_datapoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_upsert_datapoints_with_metadata` + interceptor in new development instead of the `post_upsert_datapoints` interceptor. + When both interceptors are used, this `post_upsert_datapoints_with_metadata` interceptor runs after the + `post_upsert_datapoints` interceptor. The (possibly modified) response returned by + `post_upsert_datapoints` will be passed to + `post_upsert_datapoints_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -603,9 +772,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -816,6 +986,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -952,6 +1126,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -960,6 +1138,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1000,6 +1186,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1162,6 +1352,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1190,6 +1384,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1302,6 +1500,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1310,6 +1512,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1370,6 +1580,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1532,6 +1746,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1572,6 +1790,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1684,6 +1906,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1692,6 +1918,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1752,6 +1986,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -1910,6 +2148,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1950,6 +2192,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2062,6 +2308,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2082,6 +2336,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2130,6 +2388,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2296,6 +2558,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2328,6 +2594,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2436,6 +2706,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2444,6 +2718,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2504,6 +2786,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2615,7 +2901,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2655,6 +2941,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2763,7 +3053,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2802,6 +3092,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2950,6 +3244,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3098,6 +3396,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_indexes(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_indexes_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3252,6 +3554,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_remove_datapoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_remove_datapoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3369,7 +3675,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3409,6 +3715,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3561,6 +3871,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_upsert_datapoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_upsert_datapoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3687,7 +4001,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -3833,7 +4146,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3975,7 +4287,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4120,7 +4431,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4271,7 +4581,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4415,7 +4724,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4531,7 +4839,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4647,7 +4954,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -4791,7 +5097,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -4933,7 +5238,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/index_service/transports/rest_asyncio.py index 0802cb3570..aa82cc32a7 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -82,6 +82,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncIndexServiceRestInterceptor: """Asynchronous Interceptor for IndexService. @@ -179,12 +182,35 @@ async def post_create_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_create_index` interceptor runs + before the `post_create_index_with_metadata` interceptor. """ return response + async def post_create_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_create_index_with_metadata` + interceptor in new development instead of the `post_create_index` interceptor. + When both interceptors are used, this `post_create_index_with_metadata` interceptor runs after the + `post_create_index` interceptor. The (possibly modified) response returned by + `post_create_index` will be passed to + `post_create_index_with_metadata`. + """ + return response, metadata + async def pre_delete_index( self, request: index_service.DeleteIndexRequest, @@ -204,12 +230,35 @@ async def post_delete_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_delete_index` interceptor runs + before the `post_delete_index_with_metadata` interceptor. """ return response + async def post_delete_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_delete_index_with_metadata` + interceptor in new development instead of the `post_delete_index` interceptor. + When both interceptors are used, this `post_delete_index_with_metadata` interceptor runs after the + `post_delete_index` interceptor. The (possibly modified) response returned by + `post_delete_index` will be passed to + `post_delete_index_with_metadata`. + """ + return response, metadata + async def pre_get_index( self, request: index_service.GetIndexRequest, @@ -225,12 +274,33 @@ async def pre_get_index( async def post_get_index(self, response: index.Index) -> index.Index: """Post-rpc interceptor for get_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_get_index` interceptor runs + before the `post_get_index_with_metadata` interceptor. """ return response + async def post_get_index_with_metadata( + self, response: index.Index, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[index.Index, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_get_index_with_metadata` + interceptor in new development instead of the `post_get_index` interceptor. + When both interceptors are used, this `post_get_index_with_metadata` interceptor runs after the + `post_get_index` interceptor. The (possibly modified) response returned by + `post_get_index` will be passed to + `post_get_index_with_metadata`. + """ + return response, metadata + async def pre_list_indexes( self, request: index_service.ListIndexesRequest, @@ -250,12 +320,37 @@ async def post_list_indexes( ) -> index_service.ListIndexesResponse: """Post-rpc interceptor for list_indexes - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_indexes_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_list_indexes` interceptor runs + before the `post_list_indexes_with_metadata` interceptor. """ return response + async def post_list_indexes_with_metadata( + self, + response: index_service.ListIndexesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + index_service.ListIndexesResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_indexes + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_list_indexes_with_metadata` + interceptor in new development instead of the `post_list_indexes` interceptor. + When both interceptors are used, this `post_list_indexes_with_metadata` interceptor runs after the + `post_list_indexes` interceptor. The (possibly modified) response returned by + `post_list_indexes` will be passed to + `post_list_indexes_with_metadata`. + """ + return response, metadata + async def pre_remove_datapoints( self, request: index_service.RemoveDatapointsRequest, @@ -275,12 +370,37 @@ async def post_remove_datapoints( ) -> index_service.RemoveDatapointsResponse: """Post-rpc interceptor for remove_datapoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_remove_datapoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_remove_datapoints` interceptor runs + before the `post_remove_datapoints_with_metadata` interceptor. """ return response + async def post_remove_datapoints_with_metadata( + self, + response: index_service.RemoveDatapointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + index_service.RemoveDatapointsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for remove_datapoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_remove_datapoints_with_metadata` + interceptor in new development instead of the `post_remove_datapoints` interceptor. + When both interceptors are used, this `post_remove_datapoints_with_metadata` interceptor runs after the + `post_remove_datapoints` interceptor. The (possibly modified) response returned by + `post_remove_datapoints` will be passed to + `post_remove_datapoints_with_metadata`. + """ + return response, metadata + async def pre_update_index( self, request: index_service.UpdateIndexRequest, @@ -300,12 +420,35 @@ async def post_update_index( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_index - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_index_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_update_index` interceptor runs + before the `post_update_index_with_metadata` interceptor. """ return response + async def post_update_index_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_index + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_update_index_with_metadata` + interceptor in new development instead of the `post_update_index` interceptor. + When both interceptors are used, this `post_update_index_with_metadata` interceptor runs after the + `post_update_index` interceptor. The (possibly modified) response returned by + `post_update_index` will be passed to + `post_update_index_with_metadata`. + """ + return response, metadata + async def pre_upsert_datapoints( self, request: index_service.UpsertDatapointsRequest, @@ -325,12 +468,37 @@ async def post_upsert_datapoints( ) -> index_service.UpsertDatapointsResponse: """Post-rpc interceptor for upsert_datapoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_upsert_datapoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the IndexService server but before - it is returned to user code. + it is returned to user code. This `post_upsert_datapoints` interceptor runs + before the `post_upsert_datapoints_with_metadata` interceptor. """ return response + async def post_upsert_datapoints_with_metadata( + self, + response: index_service.UpsertDatapointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + index_service.UpsertDatapointsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for upsert_datapoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the IndexService server but before it is returned to user code. + + We recommend only using this `post_upsert_datapoints_with_metadata` + interceptor in new development instead of the `post_upsert_datapoints` interceptor. + When both interceptors are used, this `post_upsert_datapoints_with_metadata` interceptor runs after the + `post_upsert_datapoints` interceptor. The (possibly modified) response returned by + `post_upsert_datapoints` will be passed to + `post_upsert_datapoints_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -640,9 +808,9 @@ def __init__( self._interceptor = interceptor or AsyncIndexServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -833,7 +1001,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -880,6 +1048,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -991,7 +1163,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1037,6 +1209,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1193,6 +1369,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1349,6 +1529,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_indexes(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_indexes_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1511,6 +1695,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_remove_datapoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_remove_datapoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1631,7 +1819,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1678,6 +1866,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_index(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_index_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1838,6 +2030,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_upsert_datapoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_upsert_datapoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2035,6 +2231,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -2171,6 +2371,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -2179,6 +2383,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2219,6 +2431,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2381,6 +2597,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2409,6 +2629,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2521,6 +2745,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2529,6 +2757,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2589,6 +2825,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2751,6 +2991,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2791,6 +3035,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2903,6 +3151,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2911,6 +3163,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2971,6 +3231,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -3129,6 +3393,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -3169,6 +3437,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -3281,6 +3553,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3301,6 +3581,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -3349,6 +3633,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3515,6 +3803,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3547,6 +3839,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3655,6 +3951,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3663,6 +3963,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3723,6 +4031,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3828,7 +4140,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -3982,7 +4293,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -4134,7 +4444,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4287,7 +4596,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4447,7 +4755,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4599,7 +4906,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4723,7 +5029,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4847,7 +5152,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -4999,7 +5303,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -5151,7 +5454,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/index_service/transports/rest_base.py index da8f8a44c5..30d09701db 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -584,6 +584,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -670,6 +674,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -748,6 +757,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -929,6 +942,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1065,6 +1082,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1073,6 +1094,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1113,6 +1142,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1294,6 +1327,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1322,6 +1359,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1434,6 +1475,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1442,6 +1487,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1502,6 +1555,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1683,6 +1740,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1723,6 +1784,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1835,6 +1900,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1843,6 +1912,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1903,6 +1980,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2080,6 +2161,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2120,6 +2205,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2232,6 +2321,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2252,6 +2349,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2300,6 +2401,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2485,6 +2590,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2517,6 +2626,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2625,6 +2738,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2633,6 +2750,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2693,6 +2818,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/job_service/__init__.py b/google/cloud/aiplatform_v1/services/job_service/__init__.py index 38a2234134..89e72b6794 100644 --- a/google/cloud/aiplatform_v1/services/job_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/job_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 3fd9161648..7692ce2451 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.job_service import pagers from google.cloud.aiplatform_v1.types import batch_prediction_job from google.cloud.aiplatform_v1.types import ( @@ -81,13 +80,15 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.money_pb2 as money_pb2 # type: ignore from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport from .client import JobServiceClient @@ -152,6 +153,10 @@ class JobServiceAsyncClient: ) network_path = staticmethod(JobServiceClient.network_path) parse_network_path = staticmethod(JobServiceClient.parse_network_path) + network_attachment_path = staticmethod(JobServiceClient.network_attachment_path) + parse_network_attachment_path = staticmethod( + JobServiceClient.parse_network_attachment_path + ) notification_channel_path = staticmethod(JobServiceClient.notification_channel_path) parse_notification_channel_path = staticmethod( JobServiceClient.parse_notification_channel_path @@ -198,7 +203,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: JobServiceAsyncClient: The constructed client. """ - return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + JobServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(JobServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -214,7 +222,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: JobServiceAsyncClient: The constructed client. """ - return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + JobServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(JobServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -355,21 +366,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.JobServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.JobService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.JobService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.JobService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.JobService", + "credentialsType": None, + } + ), ) async def create_custom_job( @@ -456,7 +469,10 @@ async def sample_create_custom_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) + flattened_params = [parent, custom_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -572,7 +588,10 @@ async def sample_get_custom_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -685,7 +704,10 @@ async def sample_list_custom_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -819,7 +841,10 @@ async def sample_delete_custom_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -938,7 +963,10 @@ async def sample_cancel_custom_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1061,7 +1089,10 @@ async def sample_create_data_labeling_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) + flattened_params = [parent, data_labeling_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1172,7 +1203,10 @@ async def sample_get_data_labeling_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1284,7 +1318,10 @@ async def sample_list_data_labeling_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1418,7 +1455,10 @@ async def sample_delete_data_labeling_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1526,7 +1566,10 @@ async def sample_cancel_data_labeling_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1658,7 +1701,10 @@ async def sample_create_hyperparameter_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) + flattened_params = [parent, hyperparameter_tuning_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1773,7 +1819,10 @@ async def sample_get_hyperparameter_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1888,7 +1937,10 @@ async def sample_list_hyperparameter_tuning_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2024,7 +2076,10 @@ async def sample_delete_hyperparameter_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2147,7 +2202,10 @@ async def sample_cancel_hyperparameter_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2266,7 +2324,10 @@ async def sample_create_nas_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, nas_job]) + flattened_params = [parent, nas_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2376,7 +2437,10 @@ async def sample_get_nas_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2489,7 +2553,10 @@ async def sample_list_nas_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2623,7 +2690,10 @@ async def sample_delete_nas_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2742,7 +2812,10 @@ async def sample_cancel_nas_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2850,7 +2923,10 @@ async def sample_get_nas_trial_detail(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2962,7 +3038,10 @@ async def sample_list_nas_trial_details(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3108,7 +3187,10 @@ async def sample_create_batch_prediction_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) + flattened_params = [parent, batch_prediction_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3224,7 +3306,10 @@ async def sample_get_batch_prediction_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3339,7 +3424,10 @@ async def sample_list_batch_prediction_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3476,7 +3564,10 @@ async def sample_delete_batch_prediction_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3597,7 +3688,10 @@ async def sample_cancel_batch_prediction_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3724,7 +3818,10 @@ async def sample_create_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_deployment_monitoring_job]) + flattened_params = [parent, model_deployment_monitoring_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3855,7 +3952,10 @@ async def sample_search_model_deployment_monitoring_stats_anomalies(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + flattened_params = [model_deployment_monitoring_job, deployed_model_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3993,7 +4093,10 @@ async def sample_get_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4108,7 +4211,10 @@ async def sample_list_model_deployment_monitoring_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4243,18 +4349,17 @@ async def sample_update_model_deployment_monitoring_job(): Updatable fields: - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -4279,7 +4384,10 @@ async def sample_update_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + flattened_params = [model_deployment_monitoring_job, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4423,7 +4531,10 @@ async def sample_delete_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4538,7 +4649,10 @@ async def sample_pause_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4641,7 +4755,10 @@ async def sample_resume_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -5391,5 +5508,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("JobServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 92c13b8bd2..da26b8089b 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.job_service import pagers from google.cloud.aiplatform_v1.types import batch_prediction_job from google.cloud.aiplatform_v1.types import ( @@ -95,13 +96,15 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore +import google.type.money_pb2 as money_pb2 # type: ignore from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import JobServiceGrpcTransport from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport @@ -199,6 +202,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -511,6 +542,28 @@ def parse_network_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_attachment_path( + project: str, + region: str, + networkattachment: str, + ) -> str: + """Returns a fully-qualified network_attachment string.""" + return "projects/{project}/regions/{region}/networkAttachments/{networkattachment}".format( + project=project, + region=region, + networkattachment=networkattachment, + ) + + @staticmethod + def parse_network_attachment_path(path: str) -> Dict[str, str]: + """Parses a network_attachment path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/networkAttachments/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def notification_channel_path( project: str, @@ -741,12 +794,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = JobServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -754,7 +803,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -786,20 +835,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = JobServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -892,6 +935,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -980,11 +1050,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = JobServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + JobServiceClient._read_environment_variables() + ) self._client_cert_source = JobServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -1093,21 +1161,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.JobServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.JobService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.JobService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.JobService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.JobService", + "credentialsType": None, + } + ), ) def create_custom_job( @@ -1194,7 +1266,10 @@ def sample_create_custom_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) + flattened_params = [parent, custom_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1307,7 +1382,10 @@ def sample_get_custom_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1417,7 +1495,10 @@ def sample_list_custom_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1548,7 +1629,10 @@ def sample_delete_custom_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1664,7 +1748,10 @@ def sample_cancel_custom_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1784,7 +1871,10 @@ def sample_create_data_labeling_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) + flattened_params = [parent, data_labeling_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1892,7 +1982,10 @@ def sample_get_data_labeling_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2001,7 +2094,10 @@ def sample_list_data_labeling_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2132,7 +2228,10 @@ def sample_delete_data_labeling_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2237,7 +2336,10 @@ def sample_cancel_data_labeling_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2366,7 +2468,10 @@ def sample_create_hyperparameter_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) + flattened_params = [parent, hyperparameter_tuning_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2480,7 +2585,10 @@ def sample_get_hyperparameter_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2594,7 +2702,10 @@ def sample_list_hyperparameter_tuning_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2729,7 +2840,10 @@ def sample_delete_hyperparameter_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2851,7 +2965,10 @@ def sample_cancel_hyperparameter_tuning_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2969,7 +3086,10 @@ def sample_create_nas_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, nas_job]) + flattened_params = [parent, nas_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3076,7 +3196,10 @@ def sample_get_nas_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3186,7 +3309,10 @@ def sample_list_nas_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3317,7 +3443,10 @@ def sample_delete_nas_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3433,7 +3562,10 @@ def sample_cancel_nas_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3538,7 +3670,10 @@ def sample_get_nas_trial_detail(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3647,7 +3782,10 @@ def sample_list_nas_trial_details(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3790,7 +3928,10 @@ def sample_create_batch_prediction_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) + flattened_params = [parent, batch_prediction_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3905,7 +4046,10 @@ def sample_get_batch_prediction_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4017,7 +4161,10 @@ def sample_list_batch_prediction_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4153,7 +4300,10 @@ def sample_delete_batch_prediction_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4273,7 +4423,10 @@ def sample_cancel_batch_prediction_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4399,7 +4552,10 @@ def sample_create_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_deployment_monitoring_job]) + flattened_params = [parent, model_deployment_monitoring_job] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4531,7 +4687,10 @@ def sample_search_model_deployment_monitoring_stats_anomalies(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + flattened_params = [model_deployment_monitoring_job, deployed_model_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4670,7 +4829,10 @@ def sample_get_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4784,7 +4946,10 @@ def sample_list_model_deployment_monitoring_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4918,18 +5083,17 @@ def sample_update_model_deployment_monitoring_job(): Updatable fields: - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` + - ``display_name`` + - ``model_deployment_monitoring_schedule_config`` + - ``model_monitoring_alert_config`` + - ``logging_sampling_strategy`` + - ``labels`` + - ``log_ttl`` + - ``enable_monitoring_pipeline_logs`` . and + - ``model_deployment_monitoring_objective_configs`` . or + - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` + - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` + - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -4954,7 +5118,10 @@ def sample_update_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + flattened_params = [model_deployment_monitoring_job, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -5099,7 +5266,10 @@ def sample_delete_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -5213,7 +5383,10 @@ def sample_pause_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -5315,7 +5488,10 @@ def sample_resume_model_deployment_monitoring_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -5413,16 +5589,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -5468,16 +5648,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -5640,16 +5824,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -5761,16 +5949,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -5883,16 +6075,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -5943,16 +6139,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -5998,16 +6198,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -6053,21 +6257,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("JobServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/job_service/pagers.py b/google/cloud/aiplatform_v1/services/job_service/pagers.py index cf28266666..30024d320a 100644 --- a/google/cloud/aiplatform_v1/services/job_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/job_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py index 0a55fab885..9c065db1e6 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/google/cloud/aiplatform_v1/services/job_service/transports/base.py index 30fc2b073a..bafd938625 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import batch_prediction_job from google.cloud.aiplatform_v1.types import ( @@ -50,12 +51,15 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class JobServiceTransport(abc.ABC): """Abstract transport class for JobService.""" @@ -90,9 +94,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -105,8 +110,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -121,11 +124,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -784,13 +792,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py index 6575c13a51..2149526426 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import JobServiceTransport, DEFAULT_CLIENT_INFO try: @@ -94,12 +94,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.JobService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -176,9 +175,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -312,9 +312,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -669,12 +670,12 @@ def create_hyperparameter_tuning_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_hyperparameter_tuning_job" not in self._stubs: - self._stubs[ - "create_hyperparameter_tuning_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob", - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + self._stubs["create_hyperparameter_tuning_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob", + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) ) return self._stubs["create_hyperparameter_tuning_job"] @@ -700,12 +701,12 @@ def get_hyperparameter_tuning_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_hyperparameter_tuning_job" not in self._stubs: - self._stubs[ - "get_hyperparameter_tuning_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob", - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + self._stubs["get_hyperparameter_tuning_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob", + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) ) return self._stubs["get_hyperparameter_tuning_job"] @@ -732,12 +733,12 @@ def list_hyperparameter_tuning_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_hyperparameter_tuning_jobs" not in self._stubs: - self._stubs[ - "list_hyperparameter_tuning_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs", - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + self._stubs["list_hyperparameter_tuning_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs", + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) ) return self._stubs["list_hyperparameter_tuning_jobs"] @@ -763,12 +764,12 @@ def delete_hyperparameter_tuning_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_hyperparameter_tuning_job" not in self._stubs: - self._stubs[ - "delete_hyperparameter_tuning_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob", - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_hyperparameter_tuning_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob", + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_hyperparameter_tuning_job"] @@ -805,12 +806,12 @@ def cancel_hyperparameter_tuning_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_hyperparameter_tuning_job" not in self._stubs: - self._stubs[ - "cancel_hyperparameter_tuning_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob", - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + self._stubs["cancel_hyperparameter_tuning_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob", + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) ) return self._stubs["cancel_hyperparameter_tuning_job"] @@ -1032,12 +1033,12 @@ def create_batch_prediction_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_batch_prediction_job" not in self._stubs: - self._stubs[ - "create_batch_prediction_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob", - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + self._stubs["create_batch_prediction_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob", + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) ) return self._stubs["create_batch_prediction_job"] @@ -1092,12 +1093,12 @@ def list_batch_prediction_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_batch_prediction_jobs" not in self._stubs: - self._stubs[ - "list_batch_prediction_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs", - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + self._stubs["list_batch_prediction_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs", + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) ) return self._stubs["list_batch_prediction_jobs"] @@ -1123,12 +1124,12 @@ def delete_batch_prediction_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_batch_prediction_job" not in self._stubs: - self._stubs[ - "delete_batch_prediction_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob", - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_batch_prediction_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob", + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_batch_prediction_job"] @@ -1162,12 +1163,12 @@ def cancel_batch_prediction_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_batch_prediction_job" not in self._stubs: - self._stubs[ - "cancel_batch_prediction_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob", - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + self._stubs["cancel_batch_prediction_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob", + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) ) return self._stubs["cancel_batch_prediction_job"] @@ -1195,12 +1196,12 @@ def create_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "create_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob", - request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + self._stubs["create_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob", + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) ) return self._stubs["create_model_deployment_monitoring_job"] @@ -1228,12 +1229,12 @@ def search_model_deployment_monitoring_stats_anomalies( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "search_model_deployment_monitoring_stats_anomalies" not in self._stubs: - self._stubs[ - "search_model_deployment_monitoring_stats_anomalies" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies", - request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, - response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + self._stubs["search_model_deployment_monitoring_stats_anomalies"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies", + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) ) return self._stubs["search_model_deployment_monitoring_stats_anomalies"] @@ -1260,12 +1261,12 @@ def get_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "get_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob", - request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + self._stubs["get_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob", + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) ) return self._stubs["get_model_deployment_monitoring_job"] @@ -1292,12 +1293,12 @@ def list_model_deployment_monitoring_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_model_deployment_monitoring_jobs" not in self._stubs: - self._stubs[ - "list_model_deployment_monitoring_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs", - request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, - response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + self._stubs["list_model_deployment_monitoring_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs", + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) ) return self._stubs["list_model_deployment_monitoring_jobs"] @@ -1324,12 +1325,12 @@ def update_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "update_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob", - request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob", + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_model_deployment_monitoring_job"] @@ -1356,12 +1357,12 @@ def delete_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "delete_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob", - request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob", + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_model_deployment_monitoring_job"] @@ -1390,12 +1391,12 @@ def pause_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "pause_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "pause_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob", - request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + self._stubs["pause_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob", + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) ) return self._stubs["pause_model_deployment_monitoring_job"] @@ -1423,12 +1424,12 @@ def resume_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "resume_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "resume_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob", - request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + self._stubs["resume_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob", + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) ) return self._stubs["resume_model_deployment_monitoring_job"] diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py index ff8edbe28d..8d19d6ac71 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -57,7 +57,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import JobServiceTransport, DEFAULT_CLIENT_INFO from .grpc import JobServiceGrpcTransport @@ -172,8 +172,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -224,9 +225,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -686,12 +688,12 @@ def create_hyperparameter_tuning_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_hyperparameter_tuning_job" not in self._stubs: - self._stubs[ - "create_hyperparameter_tuning_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob", - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + self._stubs["create_hyperparameter_tuning_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob", + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) ) return self._stubs["create_hyperparameter_tuning_job"] @@ -717,12 +719,12 @@ def get_hyperparameter_tuning_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_hyperparameter_tuning_job" not in self._stubs: - self._stubs[ - "get_hyperparameter_tuning_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob", - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + self._stubs["get_hyperparameter_tuning_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob", + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) ) return self._stubs["get_hyperparameter_tuning_job"] @@ -749,12 +751,12 @@ def list_hyperparameter_tuning_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_hyperparameter_tuning_jobs" not in self._stubs: - self._stubs[ - "list_hyperparameter_tuning_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs", - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + self._stubs["list_hyperparameter_tuning_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs", + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) ) return self._stubs["list_hyperparameter_tuning_jobs"] @@ -781,12 +783,12 @@ def delete_hyperparameter_tuning_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_hyperparameter_tuning_job" not in self._stubs: - self._stubs[ - "delete_hyperparameter_tuning_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob", - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_hyperparameter_tuning_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob", + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_hyperparameter_tuning_job"] @@ -825,12 +827,12 @@ def cancel_hyperparameter_tuning_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_hyperparameter_tuning_job" not in self._stubs: - self._stubs[ - "cancel_hyperparameter_tuning_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob", - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + self._stubs["cancel_hyperparameter_tuning_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob", + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) ) return self._stubs["cancel_hyperparameter_tuning_job"] @@ -1060,12 +1062,12 @@ def create_batch_prediction_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_batch_prediction_job" not in self._stubs: - self._stubs[ - "create_batch_prediction_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob", - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + self._stubs["create_batch_prediction_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob", + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) ) return self._stubs["create_batch_prediction_job"] @@ -1120,12 +1122,12 @@ def list_batch_prediction_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_batch_prediction_jobs" not in self._stubs: - self._stubs[ - "list_batch_prediction_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs", - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + self._stubs["list_batch_prediction_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs", + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) ) return self._stubs["list_batch_prediction_jobs"] @@ -1152,12 +1154,12 @@ def delete_batch_prediction_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_batch_prediction_job" not in self._stubs: - self._stubs[ - "delete_batch_prediction_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob", - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_batch_prediction_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob", + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_batch_prediction_job"] @@ -1193,12 +1195,12 @@ def cancel_batch_prediction_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_batch_prediction_job" not in self._stubs: - self._stubs[ - "cancel_batch_prediction_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob", - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + self._stubs["cancel_batch_prediction_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob", + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) ) return self._stubs["cancel_batch_prediction_job"] @@ -1226,12 +1228,12 @@ def create_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "create_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob", - request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + self._stubs["create_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob", + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) ) return self._stubs["create_model_deployment_monitoring_job"] @@ -1259,12 +1261,12 @@ def search_model_deployment_monitoring_stats_anomalies( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "search_model_deployment_monitoring_stats_anomalies" not in self._stubs: - self._stubs[ - "search_model_deployment_monitoring_stats_anomalies" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies", - request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, - response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + self._stubs["search_model_deployment_monitoring_stats_anomalies"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies", + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) ) return self._stubs["search_model_deployment_monitoring_stats_anomalies"] @@ -1291,12 +1293,12 @@ def get_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "get_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob", - request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + self._stubs["get_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob", + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) ) return self._stubs["get_model_deployment_monitoring_job"] @@ -1323,12 +1325,12 @@ def list_model_deployment_monitoring_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_model_deployment_monitoring_jobs" not in self._stubs: - self._stubs[ - "list_model_deployment_monitoring_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs", - request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, - response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + self._stubs["list_model_deployment_monitoring_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs", + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) ) return self._stubs["list_model_deployment_monitoring_jobs"] @@ -1355,12 +1357,12 @@ def update_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "update_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob", - request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob", + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_model_deployment_monitoring_job"] @@ -1387,12 +1389,12 @@ def delete_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "delete_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob", - request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob", + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_model_deployment_monitoring_job"] @@ -1422,12 +1424,12 @@ def pause_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "pause_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "pause_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob", - request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + self._stubs["pause_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob", + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) ) return self._stubs["pause_model_deployment_monitoring_job"] @@ -1456,12 +1458,12 @@ def resume_model_deployment_monitoring_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "resume_model_deployment_monitoring_job" not in self._stubs: - self._stubs[ - "resume_model_deployment_monitoring_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob", - request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + self._stubs["resume_model_deployment_monitoring_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob", + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) ) return self._stubs["resume_model_deployment_monitoring_job"] diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py index 12afe3778f..02929c3118 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -55,7 +56,7 @@ ) from google.cloud.aiplatform_v1.types import nas_job from google.cloud.aiplatform_v1.types import nas_job as gca_nas_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -82,6 +83,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class JobServiceRestInterceptor: """Interceptor for JobService. @@ -449,12 +453,38 @@ def post_create_batch_prediction_job( ) -> gca_batch_prediction_job.BatchPredictionJob: """Post-rpc interceptor for create_batch_prediction_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_batch_prediction_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_batch_prediction_job` interceptor runs + before the `post_create_batch_prediction_job_with_metadata` interceptor. """ return response + def post_create_batch_prediction_job_with_metadata( + self, + response: gca_batch_prediction_job.BatchPredictionJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_batch_prediction_job.BatchPredictionJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for create_batch_prediction_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_batch_prediction_job_with_metadata` + interceptor in new development instead of the `post_create_batch_prediction_job` interceptor. + When both interceptors are used, this `post_create_batch_prediction_job_with_metadata` interceptor runs after the + `post_create_batch_prediction_job` interceptor. The (possibly modified) response returned by + `post_create_batch_prediction_job` will be passed to + `post_create_batch_prediction_job_with_metadata`. + """ + return response, metadata + def pre_create_custom_job( self, request: job_service.CreateCustomJobRequest, @@ -474,12 +504,35 @@ def post_create_custom_job( ) -> gca_custom_job.CustomJob: """Post-rpc interceptor for create_custom_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_custom_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_custom_job` interceptor runs + before the `post_create_custom_job_with_metadata` interceptor. """ return response + def post_create_custom_job_with_metadata( + self, + response: gca_custom_job.CustomJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_custom_job.CustomJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_custom_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_custom_job_with_metadata` + interceptor in new development instead of the `post_create_custom_job` interceptor. + When both interceptors are used, this `post_create_custom_job_with_metadata` interceptor runs after the + `post_create_custom_job` interceptor. The (possibly modified) response returned by + `post_create_custom_job` will be passed to + `post_create_custom_job_with_metadata`. + """ + return response, metadata + def pre_create_data_labeling_job( self, request: job_service.CreateDataLabelingJobRequest, @@ -500,12 +553,37 @@ def post_create_data_labeling_job( ) -> gca_data_labeling_job.DataLabelingJob: """Post-rpc interceptor for create_data_labeling_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_data_labeling_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_data_labeling_job` interceptor runs + before the `post_create_data_labeling_job_with_metadata` interceptor. """ return response + def post_create_data_labeling_job_with_metadata( + self, + response: gca_data_labeling_job.DataLabelingJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_data_labeling_job.DataLabelingJob, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_data_labeling_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_data_labeling_job_with_metadata` + interceptor in new development instead of the `post_create_data_labeling_job` interceptor. + When both interceptors are used, this `post_create_data_labeling_job_with_metadata` interceptor runs after the + `post_create_data_labeling_job` interceptor. The (possibly modified) response returned by + `post_create_data_labeling_job` will be passed to + `post_create_data_labeling_job_with_metadata`. + """ + return response, metadata + def pre_create_hyperparameter_tuning_job( self, request: job_service.CreateHyperparameterTuningJobRequest, @@ -526,12 +604,38 @@ def post_create_hyperparameter_tuning_job( ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: """Post-rpc interceptor for create_hyperparameter_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_hyperparameter_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_hyperparameter_tuning_job` interceptor runs + before the `post_create_hyperparameter_tuning_job_with_metadata` interceptor. """ return response + def post_create_hyperparameter_tuning_job_with_metadata( + self, + response: gca_hyperparameter_tuning_job.HyperparameterTuningJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_hyperparameter_tuning_job.HyperparameterTuningJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for create_hyperparameter_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_hyperparameter_tuning_job_with_metadata` + interceptor in new development instead of the `post_create_hyperparameter_tuning_job` interceptor. + When both interceptors are used, this `post_create_hyperparameter_tuning_job_with_metadata` interceptor runs after the + `post_create_hyperparameter_tuning_job` interceptor. The (possibly modified) response returned by + `post_create_hyperparameter_tuning_job` will be passed to + `post_create_hyperparameter_tuning_job_with_metadata`. + """ + return response, metadata + def pre_create_model_deployment_monitoring_job( self, request: job_service.CreateModelDeploymentMonitoringJobRequest, @@ -552,12 +656,38 @@ def post_create_model_deployment_monitoring_job( ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: """Post-rpc interceptor for create_model_deployment_monitoring_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_model_deployment_monitoring_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_model_deployment_monitoring_job` interceptor runs + before the `post_create_model_deployment_monitoring_job_with_metadata` interceptor. """ return response + def post_create_model_deployment_monitoring_job_with_metadata( + self, + response: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for create_model_deployment_monitoring_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_model_deployment_monitoring_job_with_metadata` + interceptor in new development instead of the `post_create_model_deployment_monitoring_job` interceptor. + When both interceptors are used, this `post_create_model_deployment_monitoring_job_with_metadata` interceptor runs after the + `post_create_model_deployment_monitoring_job` interceptor. The (possibly modified) response returned by + `post_create_model_deployment_monitoring_job` will be passed to + `post_create_model_deployment_monitoring_job_with_metadata`. + """ + return response, metadata + def pre_create_nas_job( self, request: job_service.CreateNasJobRequest, @@ -575,12 +705,35 @@ def pre_create_nas_job( def post_create_nas_job(self, response: gca_nas_job.NasJob) -> gca_nas_job.NasJob: """Post-rpc interceptor for create_nas_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_nas_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_nas_job` interceptor runs + before the `post_create_nas_job_with_metadata` interceptor. """ return response + def post_create_nas_job_with_metadata( + self, + response: gca_nas_job.NasJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_nas_job.NasJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_nas_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_nas_job_with_metadata` + interceptor in new development instead of the `post_create_nas_job` interceptor. + When both interceptors are used, this `post_create_nas_job_with_metadata` interceptor runs after the + `post_create_nas_job` interceptor. The (possibly modified) response returned by + `post_create_nas_job` will be passed to + `post_create_nas_job_with_metadata`. + """ + return response, metadata + def pre_delete_batch_prediction_job( self, request: job_service.DeleteBatchPredictionJobRequest, @@ -601,12 +754,35 @@ def post_delete_batch_prediction_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_batch_prediction_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_batch_prediction_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_batch_prediction_job` interceptor runs + before the `post_delete_batch_prediction_job_with_metadata` interceptor. """ return response + def post_delete_batch_prediction_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_batch_prediction_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_batch_prediction_job_with_metadata` + interceptor in new development instead of the `post_delete_batch_prediction_job` interceptor. + When both interceptors are used, this `post_delete_batch_prediction_job_with_metadata` interceptor runs after the + `post_delete_batch_prediction_job` interceptor. The (possibly modified) response returned by + `post_delete_batch_prediction_job` will be passed to + `post_delete_batch_prediction_job_with_metadata`. + """ + return response, metadata + def pre_delete_custom_job( self, request: job_service.DeleteCustomJobRequest, @@ -626,12 +802,35 @@ def post_delete_custom_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_custom_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_custom_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_custom_job` interceptor runs + before the `post_delete_custom_job_with_metadata` interceptor. """ return response + def post_delete_custom_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_custom_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_custom_job_with_metadata` + interceptor in new development instead of the `post_delete_custom_job` interceptor. + When both interceptors are used, this `post_delete_custom_job_with_metadata` interceptor runs after the + `post_delete_custom_job` interceptor. The (possibly modified) response returned by + `post_delete_custom_job` will be passed to + `post_delete_custom_job_with_metadata`. + """ + return response, metadata + def pre_delete_data_labeling_job( self, request: job_service.DeleteDataLabelingJobRequest, @@ -652,12 +851,35 @@ def post_delete_data_labeling_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_data_labeling_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_data_labeling_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_data_labeling_job` interceptor runs + before the `post_delete_data_labeling_job_with_metadata` interceptor. """ return response + def post_delete_data_labeling_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_data_labeling_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_data_labeling_job_with_metadata` + interceptor in new development instead of the `post_delete_data_labeling_job` interceptor. + When both interceptors are used, this `post_delete_data_labeling_job_with_metadata` interceptor runs after the + `post_delete_data_labeling_job` interceptor. The (possibly modified) response returned by + `post_delete_data_labeling_job` will be passed to + `post_delete_data_labeling_job_with_metadata`. + """ + return response, metadata + def pre_delete_hyperparameter_tuning_job( self, request: job_service.DeleteHyperparameterTuningJobRequest, @@ -678,12 +900,35 @@ def post_delete_hyperparameter_tuning_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_hyperparameter_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_hyperparameter_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_hyperparameter_tuning_job` interceptor runs + before the `post_delete_hyperparameter_tuning_job_with_metadata` interceptor. """ return response + def post_delete_hyperparameter_tuning_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_hyperparameter_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_hyperparameter_tuning_job_with_metadata` + interceptor in new development instead of the `post_delete_hyperparameter_tuning_job` interceptor. + When both interceptors are used, this `post_delete_hyperparameter_tuning_job_with_metadata` interceptor runs after the + `post_delete_hyperparameter_tuning_job` interceptor. The (possibly modified) response returned by + `post_delete_hyperparameter_tuning_job` will be passed to + `post_delete_hyperparameter_tuning_job_with_metadata`. + """ + return response, metadata + def pre_delete_model_deployment_monitoring_job( self, request: job_service.DeleteModelDeploymentMonitoringJobRequest, @@ -704,12 +949,35 @@ def post_delete_model_deployment_monitoring_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_model_deployment_monitoring_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_model_deployment_monitoring_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_model_deployment_monitoring_job` interceptor runs + before the `post_delete_model_deployment_monitoring_job_with_metadata` interceptor. """ return response + def post_delete_model_deployment_monitoring_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_model_deployment_monitoring_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_model_deployment_monitoring_job_with_metadata` + interceptor in new development instead of the `post_delete_model_deployment_monitoring_job` interceptor. + When both interceptors are used, this `post_delete_model_deployment_monitoring_job_with_metadata` interceptor runs after the + `post_delete_model_deployment_monitoring_job` interceptor. The (possibly modified) response returned by + `post_delete_model_deployment_monitoring_job` will be passed to + `post_delete_model_deployment_monitoring_job_with_metadata`. + """ + return response, metadata + def pre_delete_nas_job( self, request: job_service.DeleteNasJobRequest, @@ -729,12 +997,35 @@ def post_delete_nas_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_nas_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_nas_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_nas_job` interceptor runs + before the `post_delete_nas_job_with_metadata` interceptor. """ return response + def post_delete_nas_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_nas_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_nas_job_with_metadata` + interceptor in new development instead of the `post_delete_nas_job` interceptor. + When both interceptors are used, this `post_delete_nas_job_with_metadata` interceptor runs after the + `post_delete_nas_job` interceptor. The (possibly modified) response returned by + `post_delete_nas_job` will be passed to + `post_delete_nas_job_with_metadata`. + """ + return response, metadata + def pre_get_batch_prediction_job( self, request: job_service.GetBatchPredictionJobRequest, @@ -755,12 +1046,37 @@ def post_get_batch_prediction_job( ) -> batch_prediction_job.BatchPredictionJob: """Post-rpc interceptor for get_batch_prediction_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_batch_prediction_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_batch_prediction_job` interceptor runs + before the `post_get_batch_prediction_job_with_metadata` interceptor. """ return response + def post_get_batch_prediction_job_with_metadata( + self, + response: batch_prediction_job.BatchPredictionJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + batch_prediction_job.BatchPredictionJob, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_batch_prediction_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_batch_prediction_job_with_metadata` + interceptor in new development instead of the `post_get_batch_prediction_job` interceptor. + When both interceptors are used, this `post_get_batch_prediction_job_with_metadata` interceptor runs after the + `post_get_batch_prediction_job` interceptor. The (possibly modified) response returned by + `post_get_batch_prediction_job` will be passed to + `post_get_batch_prediction_job_with_metadata`. + """ + return response, metadata + def pre_get_custom_job( self, request: job_service.GetCustomJobRequest, @@ -780,12 +1096,35 @@ def post_get_custom_job( ) -> custom_job.CustomJob: """Post-rpc interceptor for get_custom_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_custom_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_custom_job` interceptor runs + before the `post_get_custom_job_with_metadata` interceptor. """ return response + def post_get_custom_job_with_metadata( + self, + response: custom_job.CustomJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[custom_job.CustomJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_custom_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_custom_job_with_metadata` + interceptor in new development instead of the `post_get_custom_job` interceptor. + When both interceptors are used, this `post_get_custom_job_with_metadata` interceptor runs after the + `post_get_custom_job` interceptor. The (possibly modified) response returned by + `post_get_custom_job` will be passed to + `post_get_custom_job_with_metadata`. + """ + return response, metadata + def pre_get_data_labeling_job( self, request: job_service.GetDataLabelingJobRequest, @@ -805,12 +1144,37 @@ def post_get_data_labeling_job( ) -> data_labeling_job.DataLabelingJob: """Post-rpc interceptor for get_data_labeling_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_data_labeling_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_data_labeling_job` interceptor runs + before the `post_get_data_labeling_job_with_metadata` interceptor. """ return response + def post_get_data_labeling_job_with_metadata( + self, + response: data_labeling_job.DataLabelingJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + data_labeling_job.DataLabelingJob, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_data_labeling_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_data_labeling_job_with_metadata` + interceptor in new development instead of the `post_get_data_labeling_job` interceptor. + When both interceptors are used, this `post_get_data_labeling_job_with_metadata` interceptor runs after the + `post_get_data_labeling_job` interceptor. The (possibly modified) response returned by + `post_get_data_labeling_job` will be passed to + `post_get_data_labeling_job_with_metadata`. + """ + return response, metadata + def pre_get_hyperparameter_tuning_job( self, request: job_service.GetHyperparameterTuningJobRequest, @@ -831,12 +1195,38 @@ def post_get_hyperparameter_tuning_job( ) -> hyperparameter_tuning_job.HyperparameterTuningJob: """Post-rpc interceptor for get_hyperparameter_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_hyperparameter_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_hyperparameter_tuning_job` interceptor runs + before the `post_get_hyperparameter_tuning_job_with_metadata` interceptor. """ return response + def post_get_hyperparameter_tuning_job_with_metadata( + self, + response: hyperparameter_tuning_job.HyperparameterTuningJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + hyperparameter_tuning_job.HyperparameterTuningJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_hyperparameter_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_hyperparameter_tuning_job_with_metadata` + interceptor in new development instead of the `post_get_hyperparameter_tuning_job` interceptor. + When both interceptors are used, this `post_get_hyperparameter_tuning_job_with_metadata` interceptor runs after the + `post_get_hyperparameter_tuning_job` interceptor. The (possibly modified) response returned by + `post_get_hyperparameter_tuning_job` will be passed to + `post_get_hyperparameter_tuning_job_with_metadata`. + """ + return response, metadata + def pre_get_model_deployment_monitoring_job( self, request: job_service.GetModelDeploymentMonitoringJobRequest, @@ -857,12 +1247,38 @@ def post_get_model_deployment_monitoring_job( ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: """Post-rpc interceptor for get_model_deployment_monitoring_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_model_deployment_monitoring_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_model_deployment_monitoring_job` interceptor runs + before the `post_get_model_deployment_monitoring_job_with_metadata` interceptor. """ return response + def post_get_model_deployment_monitoring_job_with_metadata( + self, + response: model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_model_deployment_monitoring_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_model_deployment_monitoring_job_with_metadata` + interceptor in new development instead of the `post_get_model_deployment_monitoring_job` interceptor. + When both interceptors are used, this `post_get_model_deployment_monitoring_job_with_metadata` interceptor runs after the + `post_get_model_deployment_monitoring_job` interceptor. The (possibly modified) response returned by + `post_get_model_deployment_monitoring_job` will be passed to + `post_get_model_deployment_monitoring_job_with_metadata`. + """ + return response, metadata + def pre_get_nas_job( self, request: job_service.GetNasJobRequest, @@ -878,12 +1294,35 @@ def pre_get_nas_job( def post_get_nas_job(self, response: nas_job.NasJob) -> nas_job.NasJob: """Post-rpc interceptor for get_nas_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_nas_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_nas_job` interceptor runs + before the `post_get_nas_job_with_metadata` interceptor. """ return response + def post_get_nas_job_with_metadata( + self, + response: nas_job.NasJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[nas_job.NasJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_nas_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_nas_job_with_metadata` + interceptor in new development instead of the `post_get_nas_job` interceptor. + When both interceptors are used, this `post_get_nas_job_with_metadata` interceptor runs after the + `post_get_nas_job` interceptor. The (possibly modified) response returned by + `post_get_nas_job` will be passed to + `post_get_nas_job_with_metadata`. + """ + return response, metadata + def pre_get_nas_trial_detail( self, request: job_service.GetNasTrialDetailRequest, @@ -903,12 +1342,35 @@ def post_get_nas_trial_detail( ) -> nas_job.NasTrialDetail: """Post-rpc interceptor for get_nas_trial_detail - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_nas_trial_detail_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_nas_trial_detail` interceptor runs + before the `post_get_nas_trial_detail_with_metadata` interceptor. """ return response + def post_get_nas_trial_detail_with_metadata( + self, + response: nas_job.NasTrialDetail, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[nas_job.NasTrialDetail, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_nas_trial_detail + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_nas_trial_detail_with_metadata` + interceptor in new development instead of the `post_get_nas_trial_detail` interceptor. + When both interceptors are used, this `post_get_nas_trial_detail_with_metadata` interceptor runs after the + `post_get_nas_trial_detail` interceptor. The (possibly modified) response returned by + `post_get_nas_trial_detail` will be passed to + `post_get_nas_trial_detail_with_metadata`. + """ + return response, metadata + def pre_list_batch_prediction_jobs( self, request: job_service.ListBatchPredictionJobsRequest, @@ -929,12 +1391,38 @@ def post_list_batch_prediction_jobs( ) -> job_service.ListBatchPredictionJobsResponse: """Post-rpc interceptor for list_batch_prediction_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_batch_prediction_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_batch_prediction_jobs` interceptor runs + before the `post_list_batch_prediction_jobs_with_metadata` interceptor. """ return response + def post_list_batch_prediction_jobs_with_metadata( + self, + response: job_service.ListBatchPredictionJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListBatchPredictionJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_batch_prediction_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_batch_prediction_jobs_with_metadata` + interceptor in new development instead of the `post_list_batch_prediction_jobs` interceptor. + When both interceptors are used, this `post_list_batch_prediction_jobs_with_metadata` interceptor runs after the + `post_list_batch_prediction_jobs` interceptor. The (possibly modified) response returned by + `post_list_batch_prediction_jobs` will be passed to + `post_list_batch_prediction_jobs_with_metadata`. + """ + return response, metadata + def pre_list_custom_jobs( self, request: job_service.ListCustomJobsRequest, @@ -954,12 +1442,37 @@ def post_list_custom_jobs( ) -> job_service.ListCustomJobsResponse: """Post-rpc interceptor for list_custom_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_custom_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_custom_jobs` interceptor runs + before the `post_list_custom_jobs_with_metadata` interceptor. """ return response + def post_list_custom_jobs_with_metadata( + self, + response: job_service.ListCustomJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListCustomJobsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_custom_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_custom_jobs_with_metadata` + interceptor in new development instead of the `post_list_custom_jobs` interceptor. + When both interceptors are used, this `post_list_custom_jobs_with_metadata` interceptor runs after the + `post_list_custom_jobs` interceptor. The (possibly modified) response returned by + `post_list_custom_jobs` will be passed to + `post_list_custom_jobs_with_metadata`. + """ + return response, metadata + def pre_list_data_labeling_jobs( self, request: job_service.ListDataLabelingJobsRequest, @@ -979,12 +1492,38 @@ def post_list_data_labeling_jobs( ) -> job_service.ListDataLabelingJobsResponse: """Post-rpc interceptor for list_data_labeling_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_data_labeling_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_data_labeling_jobs` interceptor runs + before the `post_list_data_labeling_jobs_with_metadata` interceptor. """ return response + def post_list_data_labeling_jobs_with_metadata( + self, + response: job_service.ListDataLabelingJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListDataLabelingJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_data_labeling_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_data_labeling_jobs_with_metadata` + interceptor in new development instead of the `post_list_data_labeling_jobs` interceptor. + When both interceptors are used, this `post_list_data_labeling_jobs_with_metadata` interceptor runs after the + `post_list_data_labeling_jobs` interceptor. The (possibly modified) response returned by + `post_list_data_labeling_jobs` will be passed to + `post_list_data_labeling_jobs_with_metadata`. + """ + return response, metadata + def pre_list_hyperparameter_tuning_jobs( self, request: job_service.ListHyperparameterTuningJobsRequest, @@ -1005,11 +1544,37 @@ def post_list_hyperparameter_tuning_jobs( ) -> job_service.ListHyperparameterTuningJobsResponse: """Post-rpc interceptor for list_hyperparameter_tuning_jobs - Override in a subclass to manipulate the response - after it is returned by the JobService server but before - it is returned to user code. + DEPRECATED. Please use the `post_list_hyperparameter_tuning_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the JobService server but before + it is returned to user code. This `post_list_hyperparameter_tuning_jobs` interceptor runs + before the `post_list_hyperparameter_tuning_jobs_with_metadata` interceptor. + """ + return response + + def post_list_hyperparameter_tuning_jobs_with_metadata( + self, + response: job_service.ListHyperparameterTuningJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListHyperparameterTuningJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_hyperparameter_tuning_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_hyperparameter_tuning_jobs_with_metadata` + interceptor in new development instead of the `post_list_hyperparameter_tuning_jobs` interceptor. + When both interceptors are used, this `post_list_hyperparameter_tuning_jobs_with_metadata` interceptor runs after the + `post_list_hyperparameter_tuning_jobs` interceptor. The (possibly modified) response returned by + `post_list_hyperparameter_tuning_jobs` will be passed to + `post_list_hyperparameter_tuning_jobs_with_metadata`. """ - return response + return response, metadata def pre_list_model_deployment_monitoring_jobs( self, @@ -1031,12 +1596,38 @@ def post_list_model_deployment_monitoring_jobs( ) -> job_service.ListModelDeploymentMonitoringJobsResponse: """Post-rpc interceptor for list_model_deployment_monitoring_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_model_deployment_monitoring_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_model_deployment_monitoring_jobs` interceptor runs + before the `post_list_model_deployment_monitoring_jobs_with_metadata` interceptor. """ return response + def post_list_model_deployment_monitoring_jobs_with_metadata( + self, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListModelDeploymentMonitoringJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_model_deployment_monitoring_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_model_deployment_monitoring_jobs_with_metadata` + interceptor in new development instead of the `post_list_model_deployment_monitoring_jobs` interceptor. + When both interceptors are used, this `post_list_model_deployment_monitoring_jobs_with_metadata` interceptor runs after the + `post_list_model_deployment_monitoring_jobs` interceptor. The (possibly modified) response returned by + `post_list_model_deployment_monitoring_jobs` will be passed to + `post_list_model_deployment_monitoring_jobs_with_metadata`. + """ + return response, metadata + def pre_list_nas_jobs( self, request: job_service.ListNasJobsRequest, @@ -1054,12 +1645,37 @@ def post_list_nas_jobs( ) -> job_service.ListNasJobsResponse: """Post-rpc interceptor for list_nas_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_nas_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_nas_jobs` interceptor runs + before the `post_list_nas_jobs_with_metadata` interceptor. """ return response + def post_list_nas_jobs_with_metadata( + self, + response: job_service.ListNasJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListNasJobsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_nas_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_nas_jobs_with_metadata` + interceptor in new development instead of the `post_list_nas_jobs` interceptor. + When both interceptors are used, this `post_list_nas_jobs_with_metadata` interceptor runs after the + `post_list_nas_jobs` interceptor. The (possibly modified) response returned by + `post_list_nas_jobs` will be passed to + `post_list_nas_jobs_with_metadata`. + """ + return response, metadata + def pre_list_nas_trial_details( self, request: job_service.ListNasTrialDetailsRequest, @@ -1079,12 +1695,37 @@ def post_list_nas_trial_details( ) -> job_service.ListNasTrialDetailsResponse: """Post-rpc interceptor for list_nas_trial_details - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_nas_trial_details_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_nas_trial_details` interceptor runs + before the `post_list_nas_trial_details_with_metadata` interceptor. """ return response + def post_list_nas_trial_details_with_metadata( + self, + response: job_service.ListNasTrialDetailsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListNasTrialDetailsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_nas_trial_details + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_nas_trial_details_with_metadata` + interceptor in new development instead of the `post_list_nas_trial_details` interceptor. + When both interceptors are used, this `post_list_nas_trial_details_with_metadata` interceptor runs after the + `post_list_nas_trial_details` interceptor. The (possibly modified) response returned by + `post_list_nas_trial_details` will be passed to + `post_list_nas_trial_details_with_metadata`. + """ + return response, metadata + def pre_pause_model_deployment_monitoring_job( self, request: job_service.PauseModelDeploymentMonitoringJobRequest, @@ -1136,12 +1777,38 @@ def post_search_model_deployment_monitoring_stats_anomalies( ) -> job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse: """Post-rpc interceptor for search_model_deployment_monitoring_stats_anomalies - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_model_deployment_monitoring_stats_anomalies_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_search_model_deployment_monitoring_stats_anomalies` interceptor runs + before the `post_search_model_deployment_monitoring_stats_anomalies_with_metadata` interceptor. """ return response + def post_search_model_deployment_monitoring_stats_anomalies_with_metadata( + self, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for search_model_deployment_monitoring_stats_anomalies + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_search_model_deployment_monitoring_stats_anomalies_with_metadata` + interceptor in new development instead of the `post_search_model_deployment_monitoring_stats_anomalies` interceptor. + When both interceptors are used, this `post_search_model_deployment_monitoring_stats_anomalies_with_metadata` interceptor runs after the + `post_search_model_deployment_monitoring_stats_anomalies` interceptor. The (possibly modified) response returned by + `post_search_model_deployment_monitoring_stats_anomalies` will be passed to + `post_search_model_deployment_monitoring_stats_anomalies_with_metadata`. + """ + return response, metadata + def pre_update_model_deployment_monitoring_job( self, request: job_service.UpdateModelDeploymentMonitoringJobRequest, @@ -1162,12 +1829,35 @@ def post_update_model_deployment_monitoring_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_model_deployment_monitoring_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_model_deployment_monitoring_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_update_model_deployment_monitoring_job` interceptor runs + before the `post_update_model_deployment_monitoring_job_with_metadata` interceptor. """ return response + def post_update_model_deployment_monitoring_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_model_deployment_monitoring_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_update_model_deployment_monitoring_job_with_metadata` + interceptor in new development instead of the `post_update_model_deployment_monitoring_job` interceptor. + When both interceptors are used, this `post_update_model_deployment_monitoring_job_with_metadata` interceptor runs after the + `post_update_model_deployment_monitoring_job` interceptor. The (possibly modified) response returned by + `post_update_model_deployment_monitoring_job` will be passed to + `post_update_model_deployment_monitoring_job_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1457,9 +2147,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1670,6 +2361,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1806,6 +2501,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1814,6 +2513,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1854,6 +2561,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2016,6 +2727,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2044,6 +2759,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2156,6 +2875,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2164,6 +2887,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2224,6 +2955,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2386,6 +3121,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2426,6 +3165,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2538,6 +3281,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2546,6 +3293,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2606,6 +3361,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2764,6 +3523,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2804,6 +3567,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2916,6 +3683,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2936,6 +3711,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2984,6 +3763,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3150,6 +3933,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3182,6 +3969,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3290,6 +4081,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3298,6 +4093,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3358,6 +4161,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3459,7 +4266,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3575,7 +4382,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3691,7 +4498,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3809,7 +4616,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3931,7 +4738,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4101,6 +4908,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_batch_prediction_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_batch_prediction_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4261,6 +5072,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_custom_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_custom_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4414,6 +5229,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_data_labeling_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_data_labeling_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4574,6 +5393,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_hyperparameter_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4669,11 +5494,10 @@ def __call__( _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_create_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_create_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -4742,6 +5566,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_model_deployment_monitoring_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4900,6 +5730,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_nas_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_nas_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5007,7 +5841,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5046,6 +5880,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_batch_prediction_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_batch_prediction_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5152,7 +5990,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5191,6 +6029,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_custom_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_custom_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5297,7 +6139,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5336,6 +6178,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_data_labeling_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_data_labeling_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5444,7 +6290,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5485,6 +6331,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_hyperparameter_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5573,11 +6425,10 @@ def __call__( _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_delete_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_delete_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -5596,7 +6447,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5635,6 +6486,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_model_deployment_monitoring_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5743,7 +6600,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5782,6 +6639,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_nas_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_nas_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5933,6 +6794,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_batch_prediction_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_batch_prediction_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6089,6 +6954,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_custom_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_custom_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6236,6 +7105,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_data_labeling_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_data_labeling_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6389,6 +7262,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_hyperparameter_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6483,11 +7362,10 @@ def __call__( _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_get_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_get_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -6551,6 +7429,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_model_deployment_monitoring_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6699,6 +7583,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_nas_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_nas_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6847,6 +7735,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_nas_trial_detail(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_nas_trial_detail_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6994,6 +7886,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_batch_prediction_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_batch_prediction_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7144,6 +8040,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_custom_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_custom_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7292,6 +8192,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_data_labeling_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_data_labeling_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7444,6 +8348,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_hyperparameter_tuning_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7535,11 +8445,10 @@ def __call__( _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_list_model_deployment_monitoring_jobs( - request, metadata + request, metadata = ( + self._interceptor.pre_list_model_deployment_monitoring_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_transcoded_request( http_options, request @@ -7599,6 +8508,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_model_deployment_monitoring_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7751,6 +8666,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_nas_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_nas_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7897,6 +8816,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_nas_trial_details(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_nas_trial_details_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7981,11 +8904,10 @@ def __call__( _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_pause_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_pause_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -8008,7 +8930,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -8102,11 +9024,10 @@ def __call__( _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_resume_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_resume_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -8129,7 +9050,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -8231,11 +9152,10 @@ def __call__( _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( - request, metadata + request, metadata = ( + self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_transcoded_request( http_options, request @@ -8306,6 +9226,12 @@ def __call__( resp = self._interceptor.post_search_model_deployment_monitoring_stats_anomalies( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -8397,11 +9323,10 @@ def __call__( _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_update_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + self._interceptor.pre_update_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -8424,7 +9349,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -8464,6 +9389,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_model_deployment_monitoring_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -8863,7 +9794,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -9009,7 +9939,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -9155,7 +10084,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -9302,7 +10230,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -9455,7 +10382,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -9599,7 +10525,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -9715,7 +10640,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -9831,7 +10755,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -9977,7 +10900,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -10121,7 +11043,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py index 509ed81bd7..0cc6a79e45 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -69,7 +69,7 @@ ) from google.cloud.aiplatform_v1.types import nas_job from google.cloud.aiplatform_v1.types import nas_job as gca_nas_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -100,6 +100,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncJobServiceRestInterceptor: """Asynchronous Interceptor for JobService. @@ -467,12 +470,38 @@ async def post_create_batch_prediction_job( ) -> gca_batch_prediction_job.BatchPredictionJob: """Post-rpc interceptor for create_batch_prediction_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_batch_prediction_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_batch_prediction_job` interceptor runs + before the `post_create_batch_prediction_job_with_metadata` interceptor. """ return response + async def post_create_batch_prediction_job_with_metadata( + self, + response: gca_batch_prediction_job.BatchPredictionJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_batch_prediction_job.BatchPredictionJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for create_batch_prediction_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_batch_prediction_job_with_metadata` + interceptor in new development instead of the `post_create_batch_prediction_job` interceptor. + When both interceptors are used, this `post_create_batch_prediction_job_with_metadata` interceptor runs after the + `post_create_batch_prediction_job` interceptor. The (possibly modified) response returned by + `post_create_batch_prediction_job` will be passed to + `post_create_batch_prediction_job_with_metadata`. + """ + return response, metadata + async def pre_create_custom_job( self, request: job_service.CreateCustomJobRequest, @@ -492,12 +521,35 @@ async def post_create_custom_job( ) -> gca_custom_job.CustomJob: """Post-rpc interceptor for create_custom_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_custom_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_custom_job` interceptor runs + before the `post_create_custom_job_with_metadata` interceptor. """ return response + async def post_create_custom_job_with_metadata( + self, + response: gca_custom_job.CustomJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_custom_job.CustomJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_custom_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_custom_job_with_metadata` + interceptor in new development instead of the `post_create_custom_job` interceptor. + When both interceptors are used, this `post_create_custom_job_with_metadata` interceptor runs after the + `post_create_custom_job` interceptor. The (possibly modified) response returned by + `post_create_custom_job` will be passed to + `post_create_custom_job_with_metadata`. + """ + return response, metadata + async def pre_create_data_labeling_job( self, request: job_service.CreateDataLabelingJobRequest, @@ -518,12 +570,37 @@ async def post_create_data_labeling_job( ) -> gca_data_labeling_job.DataLabelingJob: """Post-rpc interceptor for create_data_labeling_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_data_labeling_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_data_labeling_job` interceptor runs + before the `post_create_data_labeling_job_with_metadata` interceptor. """ return response + async def post_create_data_labeling_job_with_metadata( + self, + response: gca_data_labeling_job.DataLabelingJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_data_labeling_job.DataLabelingJob, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_data_labeling_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_data_labeling_job_with_metadata` + interceptor in new development instead of the `post_create_data_labeling_job` interceptor. + When both interceptors are used, this `post_create_data_labeling_job_with_metadata` interceptor runs after the + `post_create_data_labeling_job` interceptor. The (possibly modified) response returned by + `post_create_data_labeling_job` will be passed to + `post_create_data_labeling_job_with_metadata`. + """ + return response, metadata + async def pre_create_hyperparameter_tuning_job( self, request: job_service.CreateHyperparameterTuningJobRequest, @@ -544,12 +621,38 @@ async def post_create_hyperparameter_tuning_job( ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: """Post-rpc interceptor for create_hyperparameter_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_hyperparameter_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_hyperparameter_tuning_job` interceptor runs + before the `post_create_hyperparameter_tuning_job_with_metadata` interceptor. """ return response + async def post_create_hyperparameter_tuning_job_with_metadata( + self, + response: gca_hyperparameter_tuning_job.HyperparameterTuningJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_hyperparameter_tuning_job.HyperparameterTuningJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for create_hyperparameter_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_hyperparameter_tuning_job_with_metadata` + interceptor in new development instead of the `post_create_hyperparameter_tuning_job` interceptor. + When both interceptors are used, this `post_create_hyperparameter_tuning_job_with_metadata` interceptor runs after the + `post_create_hyperparameter_tuning_job` interceptor. The (possibly modified) response returned by + `post_create_hyperparameter_tuning_job` will be passed to + `post_create_hyperparameter_tuning_job_with_metadata`. + """ + return response, metadata + async def pre_create_model_deployment_monitoring_job( self, request: job_service.CreateModelDeploymentMonitoringJobRequest, @@ -570,12 +673,38 @@ async def post_create_model_deployment_monitoring_job( ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: """Post-rpc interceptor for create_model_deployment_monitoring_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_model_deployment_monitoring_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_model_deployment_monitoring_job` interceptor runs + before the `post_create_model_deployment_monitoring_job_with_metadata` interceptor. """ return response + async def post_create_model_deployment_monitoring_job_with_metadata( + self, + response: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for create_model_deployment_monitoring_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_model_deployment_monitoring_job_with_metadata` + interceptor in new development instead of the `post_create_model_deployment_monitoring_job` interceptor. + When both interceptors are used, this `post_create_model_deployment_monitoring_job_with_metadata` interceptor runs after the + `post_create_model_deployment_monitoring_job` interceptor. The (possibly modified) response returned by + `post_create_model_deployment_monitoring_job` will be passed to + `post_create_model_deployment_monitoring_job_with_metadata`. + """ + return response, metadata + async def pre_create_nas_job( self, request: job_service.CreateNasJobRequest, @@ -595,12 +724,35 @@ async def post_create_nas_job( ) -> gca_nas_job.NasJob: """Post-rpc interceptor for create_nas_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_nas_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_create_nas_job` interceptor runs + before the `post_create_nas_job_with_metadata` interceptor. """ return response + async def post_create_nas_job_with_metadata( + self, + response: gca_nas_job.NasJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_nas_job.NasJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_nas_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_create_nas_job_with_metadata` + interceptor in new development instead of the `post_create_nas_job` interceptor. + When both interceptors are used, this `post_create_nas_job_with_metadata` interceptor runs after the + `post_create_nas_job` interceptor. The (possibly modified) response returned by + `post_create_nas_job` will be passed to + `post_create_nas_job_with_metadata`. + """ + return response, metadata + async def pre_delete_batch_prediction_job( self, request: job_service.DeleteBatchPredictionJobRequest, @@ -621,12 +773,35 @@ async def post_delete_batch_prediction_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_batch_prediction_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_batch_prediction_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_batch_prediction_job` interceptor runs + before the `post_delete_batch_prediction_job_with_metadata` interceptor. """ return response + async def post_delete_batch_prediction_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_batch_prediction_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_batch_prediction_job_with_metadata` + interceptor in new development instead of the `post_delete_batch_prediction_job` interceptor. + When both interceptors are used, this `post_delete_batch_prediction_job_with_metadata` interceptor runs after the + `post_delete_batch_prediction_job` interceptor. The (possibly modified) response returned by + `post_delete_batch_prediction_job` will be passed to + `post_delete_batch_prediction_job_with_metadata`. + """ + return response, metadata + async def pre_delete_custom_job( self, request: job_service.DeleteCustomJobRequest, @@ -646,12 +821,35 @@ async def post_delete_custom_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_custom_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_custom_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_custom_job` interceptor runs + before the `post_delete_custom_job_with_metadata` interceptor. """ return response + async def post_delete_custom_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_custom_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_custom_job_with_metadata` + interceptor in new development instead of the `post_delete_custom_job` interceptor. + When both interceptors are used, this `post_delete_custom_job_with_metadata` interceptor runs after the + `post_delete_custom_job` interceptor. The (possibly modified) response returned by + `post_delete_custom_job` will be passed to + `post_delete_custom_job_with_metadata`. + """ + return response, metadata + async def pre_delete_data_labeling_job( self, request: job_service.DeleteDataLabelingJobRequest, @@ -672,12 +870,35 @@ async def post_delete_data_labeling_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_data_labeling_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_data_labeling_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_data_labeling_job` interceptor runs + before the `post_delete_data_labeling_job_with_metadata` interceptor. """ return response + async def post_delete_data_labeling_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_data_labeling_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_data_labeling_job_with_metadata` + interceptor in new development instead of the `post_delete_data_labeling_job` interceptor. + When both interceptors are used, this `post_delete_data_labeling_job_with_metadata` interceptor runs after the + `post_delete_data_labeling_job` interceptor. The (possibly modified) response returned by + `post_delete_data_labeling_job` will be passed to + `post_delete_data_labeling_job_with_metadata`. + """ + return response, metadata + async def pre_delete_hyperparameter_tuning_job( self, request: job_service.DeleteHyperparameterTuningJobRequest, @@ -698,12 +919,35 @@ async def post_delete_hyperparameter_tuning_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_hyperparameter_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_hyperparameter_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_hyperparameter_tuning_job` interceptor runs + before the `post_delete_hyperparameter_tuning_job_with_metadata` interceptor. """ return response + async def post_delete_hyperparameter_tuning_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_hyperparameter_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_hyperparameter_tuning_job_with_metadata` + interceptor in new development instead of the `post_delete_hyperparameter_tuning_job` interceptor. + When both interceptors are used, this `post_delete_hyperparameter_tuning_job_with_metadata` interceptor runs after the + `post_delete_hyperparameter_tuning_job` interceptor. The (possibly modified) response returned by + `post_delete_hyperparameter_tuning_job` will be passed to + `post_delete_hyperparameter_tuning_job_with_metadata`. + """ + return response, metadata + async def pre_delete_model_deployment_monitoring_job( self, request: job_service.DeleteModelDeploymentMonitoringJobRequest, @@ -724,12 +968,35 @@ async def post_delete_model_deployment_monitoring_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_model_deployment_monitoring_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_model_deployment_monitoring_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_model_deployment_monitoring_job` interceptor runs + before the `post_delete_model_deployment_monitoring_job_with_metadata` interceptor. """ return response + async def post_delete_model_deployment_monitoring_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_model_deployment_monitoring_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_model_deployment_monitoring_job_with_metadata` + interceptor in new development instead of the `post_delete_model_deployment_monitoring_job` interceptor. + When both interceptors are used, this `post_delete_model_deployment_monitoring_job_with_metadata` interceptor runs after the + `post_delete_model_deployment_monitoring_job` interceptor. The (possibly modified) response returned by + `post_delete_model_deployment_monitoring_job` will be passed to + `post_delete_model_deployment_monitoring_job_with_metadata`. + """ + return response, metadata + async def pre_delete_nas_job( self, request: job_service.DeleteNasJobRequest, @@ -749,12 +1016,35 @@ async def post_delete_nas_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_nas_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_nas_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_delete_nas_job` interceptor runs + before the `post_delete_nas_job_with_metadata` interceptor. """ return response + async def post_delete_nas_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_nas_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_delete_nas_job_with_metadata` + interceptor in new development instead of the `post_delete_nas_job` interceptor. + When both interceptors are used, this `post_delete_nas_job_with_metadata` interceptor runs after the + `post_delete_nas_job` interceptor. The (possibly modified) response returned by + `post_delete_nas_job` will be passed to + `post_delete_nas_job_with_metadata`. + """ + return response, metadata + async def pre_get_batch_prediction_job( self, request: job_service.GetBatchPredictionJobRequest, @@ -775,12 +1065,37 @@ async def post_get_batch_prediction_job( ) -> batch_prediction_job.BatchPredictionJob: """Post-rpc interceptor for get_batch_prediction_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_batch_prediction_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_batch_prediction_job` interceptor runs + before the `post_get_batch_prediction_job_with_metadata` interceptor. """ return response + async def post_get_batch_prediction_job_with_metadata( + self, + response: batch_prediction_job.BatchPredictionJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + batch_prediction_job.BatchPredictionJob, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_batch_prediction_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_batch_prediction_job_with_metadata` + interceptor in new development instead of the `post_get_batch_prediction_job` interceptor. + When both interceptors are used, this `post_get_batch_prediction_job_with_metadata` interceptor runs after the + `post_get_batch_prediction_job` interceptor. The (possibly modified) response returned by + `post_get_batch_prediction_job` will be passed to + `post_get_batch_prediction_job_with_metadata`. + """ + return response, metadata + async def pre_get_custom_job( self, request: job_service.GetCustomJobRequest, @@ -800,12 +1115,35 @@ async def post_get_custom_job( ) -> custom_job.CustomJob: """Post-rpc interceptor for get_custom_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_custom_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_custom_job` interceptor runs + before the `post_get_custom_job_with_metadata` interceptor. """ return response + async def post_get_custom_job_with_metadata( + self, + response: custom_job.CustomJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[custom_job.CustomJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_custom_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_custom_job_with_metadata` + interceptor in new development instead of the `post_get_custom_job` interceptor. + When both interceptors are used, this `post_get_custom_job_with_metadata` interceptor runs after the + `post_get_custom_job` interceptor. The (possibly modified) response returned by + `post_get_custom_job` will be passed to + `post_get_custom_job_with_metadata`. + """ + return response, metadata + async def pre_get_data_labeling_job( self, request: job_service.GetDataLabelingJobRequest, @@ -825,12 +1163,37 @@ async def post_get_data_labeling_job( ) -> data_labeling_job.DataLabelingJob: """Post-rpc interceptor for get_data_labeling_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_data_labeling_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_data_labeling_job` interceptor runs + before the `post_get_data_labeling_job_with_metadata` interceptor. """ return response + async def post_get_data_labeling_job_with_metadata( + self, + response: data_labeling_job.DataLabelingJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + data_labeling_job.DataLabelingJob, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_data_labeling_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_data_labeling_job_with_metadata` + interceptor in new development instead of the `post_get_data_labeling_job` interceptor. + When both interceptors are used, this `post_get_data_labeling_job_with_metadata` interceptor runs after the + `post_get_data_labeling_job` interceptor. The (possibly modified) response returned by + `post_get_data_labeling_job` will be passed to + `post_get_data_labeling_job_with_metadata`. + """ + return response, metadata + async def pre_get_hyperparameter_tuning_job( self, request: job_service.GetHyperparameterTuningJobRequest, @@ -851,12 +1214,38 @@ async def post_get_hyperparameter_tuning_job( ) -> hyperparameter_tuning_job.HyperparameterTuningJob: """Post-rpc interceptor for get_hyperparameter_tuning_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_hyperparameter_tuning_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_hyperparameter_tuning_job` interceptor runs + before the `post_get_hyperparameter_tuning_job_with_metadata` interceptor. """ return response + async def post_get_hyperparameter_tuning_job_with_metadata( + self, + response: hyperparameter_tuning_job.HyperparameterTuningJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + hyperparameter_tuning_job.HyperparameterTuningJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_hyperparameter_tuning_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_hyperparameter_tuning_job_with_metadata` + interceptor in new development instead of the `post_get_hyperparameter_tuning_job` interceptor. + When both interceptors are used, this `post_get_hyperparameter_tuning_job_with_metadata` interceptor runs after the + `post_get_hyperparameter_tuning_job` interceptor. The (possibly modified) response returned by + `post_get_hyperparameter_tuning_job` will be passed to + `post_get_hyperparameter_tuning_job_with_metadata`. + """ + return response, metadata + async def pre_get_model_deployment_monitoring_job( self, request: job_service.GetModelDeploymentMonitoringJobRequest, @@ -877,12 +1266,38 @@ async def post_get_model_deployment_monitoring_job( ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: """Post-rpc interceptor for get_model_deployment_monitoring_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_model_deployment_monitoring_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_model_deployment_monitoring_job` interceptor runs + before the `post_get_model_deployment_monitoring_job_with_metadata` interceptor. """ return response + async def post_get_model_deployment_monitoring_job_with_metadata( + self, + response: model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_model_deployment_monitoring_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_model_deployment_monitoring_job_with_metadata` + interceptor in new development instead of the `post_get_model_deployment_monitoring_job` interceptor. + When both interceptors are used, this `post_get_model_deployment_monitoring_job_with_metadata` interceptor runs after the + `post_get_model_deployment_monitoring_job` interceptor. The (possibly modified) response returned by + `post_get_model_deployment_monitoring_job` will be passed to + `post_get_model_deployment_monitoring_job_with_metadata`. + """ + return response, metadata + async def pre_get_nas_job( self, request: job_service.GetNasJobRequest, @@ -898,12 +1313,35 @@ async def pre_get_nas_job( async def post_get_nas_job(self, response: nas_job.NasJob) -> nas_job.NasJob: """Post-rpc interceptor for get_nas_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_nas_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_nas_job` interceptor runs + before the `post_get_nas_job_with_metadata` interceptor. """ return response + async def post_get_nas_job_with_metadata( + self, + response: nas_job.NasJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[nas_job.NasJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_nas_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_nas_job_with_metadata` + interceptor in new development instead of the `post_get_nas_job` interceptor. + When both interceptors are used, this `post_get_nas_job_with_metadata` interceptor runs after the + `post_get_nas_job` interceptor. The (possibly modified) response returned by + `post_get_nas_job` will be passed to + `post_get_nas_job_with_metadata`. + """ + return response, metadata + async def pre_get_nas_trial_detail( self, request: job_service.GetNasTrialDetailRequest, @@ -923,12 +1361,35 @@ async def post_get_nas_trial_detail( ) -> nas_job.NasTrialDetail: """Post-rpc interceptor for get_nas_trial_detail - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_nas_trial_detail_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_get_nas_trial_detail` interceptor runs + before the `post_get_nas_trial_detail_with_metadata` interceptor. """ return response + async def post_get_nas_trial_detail_with_metadata( + self, + response: nas_job.NasTrialDetail, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[nas_job.NasTrialDetail, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_nas_trial_detail + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_get_nas_trial_detail_with_metadata` + interceptor in new development instead of the `post_get_nas_trial_detail` interceptor. + When both interceptors are used, this `post_get_nas_trial_detail_with_metadata` interceptor runs after the + `post_get_nas_trial_detail` interceptor. The (possibly modified) response returned by + `post_get_nas_trial_detail` will be passed to + `post_get_nas_trial_detail_with_metadata`. + """ + return response, metadata + async def pre_list_batch_prediction_jobs( self, request: job_service.ListBatchPredictionJobsRequest, @@ -949,12 +1410,38 @@ async def post_list_batch_prediction_jobs( ) -> job_service.ListBatchPredictionJobsResponse: """Post-rpc interceptor for list_batch_prediction_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_batch_prediction_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_batch_prediction_jobs` interceptor runs + before the `post_list_batch_prediction_jobs_with_metadata` interceptor. """ return response + async def post_list_batch_prediction_jobs_with_metadata( + self, + response: job_service.ListBatchPredictionJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListBatchPredictionJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_batch_prediction_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_batch_prediction_jobs_with_metadata` + interceptor in new development instead of the `post_list_batch_prediction_jobs` interceptor. + When both interceptors are used, this `post_list_batch_prediction_jobs_with_metadata` interceptor runs after the + `post_list_batch_prediction_jobs` interceptor. The (possibly modified) response returned by + `post_list_batch_prediction_jobs` will be passed to + `post_list_batch_prediction_jobs_with_metadata`. + """ + return response, metadata + async def pre_list_custom_jobs( self, request: job_service.ListCustomJobsRequest, @@ -974,12 +1461,37 @@ async def post_list_custom_jobs( ) -> job_service.ListCustomJobsResponse: """Post-rpc interceptor for list_custom_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_custom_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_custom_jobs` interceptor runs + before the `post_list_custom_jobs_with_metadata` interceptor. """ return response + async def post_list_custom_jobs_with_metadata( + self, + response: job_service.ListCustomJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListCustomJobsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_custom_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_custom_jobs_with_metadata` + interceptor in new development instead of the `post_list_custom_jobs` interceptor. + When both interceptors are used, this `post_list_custom_jobs_with_metadata` interceptor runs after the + `post_list_custom_jobs` interceptor. The (possibly modified) response returned by + `post_list_custom_jobs` will be passed to + `post_list_custom_jobs_with_metadata`. + """ + return response, metadata + async def pre_list_data_labeling_jobs( self, request: job_service.ListDataLabelingJobsRequest, @@ -999,12 +1511,38 @@ async def post_list_data_labeling_jobs( ) -> job_service.ListDataLabelingJobsResponse: """Post-rpc interceptor for list_data_labeling_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_data_labeling_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_data_labeling_jobs` interceptor runs + before the `post_list_data_labeling_jobs_with_metadata` interceptor. """ return response + async def post_list_data_labeling_jobs_with_metadata( + self, + response: job_service.ListDataLabelingJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListDataLabelingJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_data_labeling_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_data_labeling_jobs_with_metadata` + interceptor in new development instead of the `post_list_data_labeling_jobs` interceptor. + When both interceptors are used, this `post_list_data_labeling_jobs_with_metadata` interceptor runs after the + `post_list_data_labeling_jobs` interceptor. The (possibly modified) response returned by + `post_list_data_labeling_jobs` will be passed to + `post_list_data_labeling_jobs_with_metadata`. + """ + return response, metadata + async def pre_list_hyperparameter_tuning_jobs( self, request: job_service.ListHyperparameterTuningJobsRequest, @@ -1025,12 +1563,38 @@ async def post_list_hyperparameter_tuning_jobs( ) -> job_service.ListHyperparameterTuningJobsResponse: """Post-rpc interceptor for list_hyperparameter_tuning_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_hyperparameter_tuning_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_hyperparameter_tuning_jobs` interceptor runs + before the `post_list_hyperparameter_tuning_jobs_with_metadata` interceptor. """ return response + async def post_list_hyperparameter_tuning_jobs_with_metadata( + self, + response: job_service.ListHyperparameterTuningJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListHyperparameterTuningJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_hyperparameter_tuning_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_hyperparameter_tuning_jobs_with_metadata` + interceptor in new development instead of the `post_list_hyperparameter_tuning_jobs` interceptor. + When both interceptors are used, this `post_list_hyperparameter_tuning_jobs_with_metadata` interceptor runs after the + `post_list_hyperparameter_tuning_jobs` interceptor. The (possibly modified) response returned by + `post_list_hyperparameter_tuning_jobs` will be passed to + `post_list_hyperparameter_tuning_jobs_with_metadata`. + """ + return response, metadata + async def pre_list_model_deployment_monitoring_jobs( self, request: job_service.ListModelDeploymentMonitoringJobsRequest, @@ -1051,12 +1615,38 @@ async def post_list_model_deployment_monitoring_jobs( ) -> job_service.ListModelDeploymentMonitoringJobsResponse: """Post-rpc interceptor for list_model_deployment_monitoring_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_model_deployment_monitoring_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_model_deployment_monitoring_jobs` interceptor runs + before the `post_list_model_deployment_monitoring_jobs_with_metadata` interceptor. """ return response + async def post_list_model_deployment_monitoring_jobs_with_metadata( + self, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListModelDeploymentMonitoringJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_model_deployment_monitoring_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_model_deployment_monitoring_jobs_with_metadata` + interceptor in new development instead of the `post_list_model_deployment_monitoring_jobs` interceptor. + When both interceptors are used, this `post_list_model_deployment_monitoring_jobs_with_metadata` interceptor runs after the + `post_list_model_deployment_monitoring_jobs` interceptor. The (possibly modified) response returned by + `post_list_model_deployment_monitoring_jobs` will be passed to + `post_list_model_deployment_monitoring_jobs_with_metadata`. + """ + return response, metadata + async def pre_list_nas_jobs( self, request: job_service.ListNasJobsRequest, @@ -1074,11 +1664,36 @@ async def post_list_nas_jobs( ) -> job_service.ListNasJobsResponse: """Post-rpc interceptor for list_nas_jobs - Override in a subclass to manipulate the response - after it is returned by the JobService server but before - it is returned to user code. + DEPRECATED. Please use the `post_list_nas_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the JobService server but before + it is returned to user code. This `post_list_nas_jobs` interceptor runs + before the `post_list_nas_jobs_with_metadata` interceptor. + """ + return response + + async def post_list_nas_jobs_with_metadata( + self, + response: job_service.ListNasJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListNasJobsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_nas_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_nas_jobs_with_metadata` + interceptor in new development instead of the `post_list_nas_jobs` interceptor. + When both interceptors are used, this `post_list_nas_jobs_with_metadata` interceptor runs after the + `post_list_nas_jobs` interceptor. The (possibly modified) response returned by + `post_list_nas_jobs` will be passed to + `post_list_nas_jobs_with_metadata`. """ - return response + return response, metadata async def pre_list_nas_trial_details( self, @@ -1099,12 +1714,37 @@ async def post_list_nas_trial_details( ) -> job_service.ListNasTrialDetailsResponse: """Post-rpc interceptor for list_nas_trial_details - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_nas_trial_details_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_list_nas_trial_details` interceptor runs + before the `post_list_nas_trial_details_with_metadata` interceptor. """ return response + async def post_list_nas_trial_details_with_metadata( + self, + response: job_service.ListNasTrialDetailsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.ListNasTrialDetailsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_nas_trial_details + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_list_nas_trial_details_with_metadata` + interceptor in new development instead of the `post_list_nas_trial_details` interceptor. + When both interceptors are used, this `post_list_nas_trial_details_with_metadata` interceptor runs after the + `post_list_nas_trial_details` interceptor. The (possibly modified) response returned by + `post_list_nas_trial_details` will be passed to + `post_list_nas_trial_details_with_metadata`. + """ + return response, metadata + async def pre_pause_model_deployment_monitoring_job( self, request: job_service.PauseModelDeploymentMonitoringJobRequest, @@ -1156,12 +1796,38 @@ async def post_search_model_deployment_monitoring_stats_anomalies( ) -> job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse: """Post-rpc interceptor for search_model_deployment_monitoring_stats_anomalies - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_model_deployment_monitoring_stats_anomalies_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_search_model_deployment_monitoring_stats_anomalies` interceptor runs + before the `post_search_model_deployment_monitoring_stats_anomalies_with_metadata` interceptor. """ return response + async def post_search_model_deployment_monitoring_stats_anomalies_with_metadata( + self, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for search_model_deployment_monitoring_stats_anomalies + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_search_model_deployment_monitoring_stats_anomalies_with_metadata` + interceptor in new development instead of the `post_search_model_deployment_monitoring_stats_anomalies` interceptor. + When both interceptors are used, this `post_search_model_deployment_monitoring_stats_anomalies_with_metadata` interceptor runs after the + `post_search_model_deployment_monitoring_stats_anomalies` interceptor. The (possibly modified) response returned by + `post_search_model_deployment_monitoring_stats_anomalies` will be passed to + `post_search_model_deployment_monitoring_stats_anomalies_with_metadata`. + """ + return response, metadata + async def pre_update_model_deployment_monitoring_job( self, request: job_service.UpdateModelDeploymentMonitoringJobRequest, @@ -1182,12 +1848,35 @@ async def post_update_model_deployment_monitoring_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_model_deployment_monitoring_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_model_deployment_monitoring_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the JobService server but before - it is returned to user code. + it is returned to user code. This `post_update_model_deployment_monitoring_job` interceptor runs + before the `post_update_model_deployment_monitoring_job_with_metadata` interceptor. """ return response + async def post_update_model_deployment_monitoring_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_model_deployment_monitoring_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the JobService server but before it is returned to user code. + + We recommend only using this `post_update_model_deployment_monitoring_job_with_metadata` + interceptor in new development instead of the `post_update_model_deployment_monitoring_job` interceptor. + When both interceptors are used, this `post_update_model_deployment_monitoring_job_with_metadata` interceptor runs after the + `post_update_model_deployment_monitoring_job` interceptor. The (possibly modified) response returned by + `post_update_model_deployment_monitoring_job` will be passed to + `post_update_model_deployment_monitoring_job_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1496,9 +2185,9 @@ def __init__( self._interceptor = interceptor or AsyncJobServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -1818,7 +2507,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1940,7 +2629,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2064,7 +2753,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2166,11 +2855,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCancelHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_cancel_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_cancel_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCancelHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -2193,7 +2881,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2321,7 +3009,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2503,6 +3191,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_batch_prediction_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_batch_prediction_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2671,6 +3365,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_custom_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_custom_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2832,6 +3530,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_data_labeling_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_data_labeling_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2925,11 +3629,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCreateHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -2999,6 +3702,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_hyperparameter_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3097,11 +3806,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseCreateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -3177,6 +3885,12 @@ async def __call__( resp = await self._interceptor.post_create_model_deployment_monitoring_job( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3343,6 +4057,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_nas_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_nas_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3452,7 +4170,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3498,6 +4216,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_batch_prediction_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_batch_prediction_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3605,7 +4329,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3653,6 +4377,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_custom_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_custom_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3760,7 +4488,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3808,6 +4536,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_data_labeling_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_data_labeling_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3897,11 +4631,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseDeleteHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -3920,7 +4653,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3966,6 +4699,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_hyperparameter_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4057,11 +4796,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseDeleteModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -4080,7 +4818,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4128,6 +4866,12 @@ async def __call__( resp = await self._interceptor.post_delete_model_deployment_monitoring_job( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4239,7 +4983,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4285,6 +5029,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_nas_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_nas_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4444,6 +5192,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_batch_prediction_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_batch_prediction_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4608,6 +5362,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_custom_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_custom_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4763,6 +5521,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_data_labeling_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_data_labeling_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4855,11 +5617,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseGetHyperparameterTuningJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_hyperparameter_tuning_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_hyperparameter_tuning_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetHyperparameterTuningJob._get_transcoded_request( http_options, request @@ -4924,6 +5685,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_hyperparameter_tuning_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_hyperparameter_tuning_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5019,11 +5786,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseGetModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -5092,6 +5858,12 @@ async def __call__( resp = await self._interceptor.post_get_model_deployment_monitoring_job( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5250,6 +6022,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_nas_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_nas_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5406,6 +6182,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_nas_trial_detail(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_nas_trial_detail_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5560,6 +6340,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_batch_prediction_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_batch_prediction_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5716,6 +6502,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_custom_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_custom_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5872,6 +6662,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_data_labeling_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_data_labeling_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5962,11 +6758,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseListHyperparameterTuningJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_hyperparameter_tuning_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_hyperparameter_tuning_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListHyperparameterTuningJobs._get_transcoded_request( http_options, request @@ -6031,6 +6826,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_hyperparameter_tuning_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_hyperparameter_tuning_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6125,11 +6926,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_model_deployment_monitoring_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_model_deployment_monitoring_jobs( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseListModelDeploymentMonitoringJobs._get_transcoded_request( http_options, request @@ -6196,6 +6996,12 @@ async def __call__( resp = await self._interceptor.post_list_model_deployment_monitoring_jobs( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_model_deployment_monitoring_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6356,6 +7162,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_nas_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_nas_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6510,6 +7320,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_nas_trial_details(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_nas_trial_details_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6597,11 +7411,10 @@ async def __call__( _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_pause_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_pause_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BasePauseModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -6624,7 +7437,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6726,11 +7539,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_resume_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_resume_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseResumeModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -6753,7 +7565,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6861,11 +7673,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( - request, metadata + request, metadata = ( + await self._interceptor.pre_search_model_deployment_monitoring_stats_anomalies( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseSearchModelDeploymentMonitoringStatsAnomalies._get_transcoded_request( http_options, request @@ -6941,6 +7752,12 @@ async def __call__( resp = await self._interceptor.post_search_model_deployment_monitoring_stats_anomalies( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_search_model_deployment_monitoring_stats_anomalies_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7035,11 +7852,10 @@ async def __call__( _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_model_deployment_monitoring_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_model_deployment_monitoring_job( + request, metadata + ) ) transcoded_request = _BaseJobServiceRestTransport._BaseUpdateModelDeploymentMonitoringJob._get_transcoded_request( http_options, request @@ -7062,7 +7878,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7111,6 +7927,12 @@ async def __call__( resp = await self._interceptor.post_update_model_deployment_monitoring_job( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_update_model_deployment_monitoring_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7306,6 +8128,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -7442,6 +8268,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -7450,6 +8280,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7490,6 +8328,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -7652,6 +8494,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -7680,6 +8526,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -7792,6 +8642,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -7800,6 +8654,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7860,6 +8722,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -8022,6 +8888,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -8062,6 +8932,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -8174,6 +9048,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -8182,6 +9060,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8242,6 +9128,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -8400,6 +9290,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -8440,6 +9334,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -8552,6 +9450,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -8572,6 +9478,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -8620,6 +9530,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -8786,6 +9700,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -8818,6 +9736,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -8926,6 +9848,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -8934,6 +9860,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -8994,6 +9928,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -9318,7 +10256,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -9472,7 +10409,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -9626,7 +10562,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -9781,7 +10716,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -9942,7 +10876,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -10094,7 +11027,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -10218,7 +11150,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -10342,7 +11273,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -10496,7 +11426,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -10648,7 +11577,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py index 2f6e889696..c7a8bddf89 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,7 +46,7 @@ ) from google.cloud.aiplatform_v1.types import nas_job from google.cloud.aiplatform_v1.types import nas_job as gca_nas_job -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -2046,6 +2046,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -2132,6 +2136,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -2210,6 +2219,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -2391,6 +2404,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -2527,6 +2544,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -2535,6 +2556,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2575,6 +2604,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2756,6 +2789,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2784,6 +2821,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2896,6 +2937,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2904,6 +2949,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2964,6 +3017,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3145,6 +3202,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -3185,6 +3246,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -3297,6 +3362,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3305,6 +3374,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3365,6 +3442,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3542,6 +3623,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -3582,6 +3667,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -3694,6 +3783,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3714,6 +3811,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -3762,6 +3863,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3947,6 +4052,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3979,6 +4088,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -4087,6 +4200,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -4095,6 +4212,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4155,6 +4280,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/__init__.py b/google/cloud/aiplatform_v1/services/llm_utility_service/__init__.py index 8d7452f02e..794046854e 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py index 60f695b40d..eda2eca20f 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,13 +45,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore +from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import llm_utility_service from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore from .transports.base import LlmUtilityServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import LlmUtilityServiceGrpcAsyncIOTransport from .client import LlmUtilityServiceClient @@ -119,7 +121,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: LlmUtilityServiceAsyncClient: The constructed client. """ - return LlmUtilityServiceClient.from_service_account_info.__func__(LlmUtilityServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + LlmUtilityServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(LlmUtilityServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -135,7 +140,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: LlmUtilityServiceAsyncClient: The constructed client. """ - return LlmUtilityServiceClient.from_service_account_file.__func__(LlmUtilityServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + LlmUtilityServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(LlmUtilityServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -280,21 +288,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", + "credentialsType": None, + } + ), ) async def count_tokens( @@ -370,7 +380,10 @@ async def sample_count_tokens(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances]) + flattened_params = [endpoint, instances] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -493,7 +506,10 @@ async def sample_compute_tokens(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances]) + flattened_params = [endpoint, instances] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1246,5 +1262,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("LlmUtilityServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py index 852812f557..19c9bd9c4e 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/client.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,13 +61,14 @@ _LOGGER = std_logging.getLogger(__name__) +from google.cloud.aiplatform_v1.types import content from google.cloud.aiplatform_v1.types import llm_utility_service from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore from .transports.base import LlmUtilityServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import LlmUtilityServiceGrpcTransport from .transports.grpc_asyncio import LlmUtilityServiceGrpcAsyncIOTransport @@ -164,6 +168,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -373,12 +405,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = LlmUtilityServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -386,7 +414,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -418,20 +446,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = LlmUtilityServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -524,6 +546,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -616,11 +665,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = LlmUtilityServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + LlmUtilityServiceClient._read_environment_variables() + ) self._client_cert_source = LlmUtilityServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -733,21 +780,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.LlmUtilityServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", + "credentialsType": None, + } + ), ) def count_tokens( @@ -823,7 +874,10 @@ def sample_count_tokens(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances]) + flattened_params = [endpoint, instances] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -943,7 +997,10 @@ def sample_compute_tokens(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances]) + flattened_params = [endpoint, instances] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1042,16 +1099,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1097,16 +1158,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1269,16 +1334,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1390,16 +1459,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -1512,16 +1585,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -1572,16 +1649,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -1627,16 +1708,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -1682,21 +1767,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("LlmUtilityServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/__init__.py index a012450ca5..b1dc8c19d3 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py index 323921b842..c89f5d4401 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import llm_utility_service from google.cloud.aiplatform_v1.types import prediction_service @@ -37,6 +38,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class LlmUtilityServiceTransport(abc.ABC): """Abstract transport class for LlmUtilityService.""" @@ -68,9 +72,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -83,8 +88,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -99,11 +102,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -253,13 +261,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc.py index 9ef8b91291..286d6e8b4c 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -75,12 +75,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.LlmUtilityService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -157,9 +156,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -292,9 +292,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc_asyncio.py index 7fc6cf561d..299fa66d8e 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -153,8 +153,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -205,9 +206,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py index 2f2e854c77..7acbf2e1af 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -63,6 +64,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class LlmUtilityServiceRestInterceptor: """Interceptor for LlmUtilityService. @@ -121,12 +125,38 @@ def post_compute_tokens( ) -> llm_utility_service.ComputeTokensResponse: """Post-rpc interceptor for compute_tokens - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_compute_tokens_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the LlmUtilityService server but before - it is returned to user code. + it is returned to user code. This `post_compute_tokens` interceptor runs + before the `post_compute_tokens_with_metadata` interceptor. """ return response + def post_compute_tokens_with_metadata( + self, + response: llm_utility_service.ComputeTokensResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + llm_utility_service.ComputeTokensResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for compute_tokens + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the LlmUtilityService server but before it is returned to user code. + + We recommend only using this `post_compute_tokens_with_metadata` + interceptor in new development instead of the `post_compute_tokens` interceptor. + When both interceptors are used, this `post_compute_tokens_with_metadata` interceptor runs after the + `post_compute_tokens` interceptor. The (possibly modified) response returned by + `post_compute_tokens` will be passed to + `post_compute_tokens_with_metadata`. + """ + return response, metadata + def pre_count_tokens( self, request: prediction_service.CountTokensRequest, @@ -146,12 +176,37 @@ def post_count_tokens( ) -> prediction_service.CountTokensResponse: """Post-rpc interceptor for count_tokens - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_count_tokens_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the LlmUtilityService server but before - it is returned to user code. + it is returned to user code. This `post_count_tokens` interceptor runs + before the `post_count_tokens_with_metadata` interceptor. """ return response + def post_count_tokens_with_metadata( + self, + response: prediction_service.CountTokensResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + prediction_service.CountTokensResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for count_tokens + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the LlmUtilityService server but before it is returned to user code. + + We recommend only using this `post_count_tokens_with_metadata` + interceptor in new development instead of the `post_count_tokens` interceptor. + When both interceptors are used, this `post_count_tokens_with_metadata` interceptor runs after the + `post_count_tokens` interceptor. The (possibly modified) response returned by + `post_count_tokens` will be passed to + `post_count_tokens_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -441,9 +496,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -610,6 +666,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_compute_tokens(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_compute_tokens_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -759,6 +819,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_count_tokens(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_count_tokens_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -846,7 +910,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -989,7 +1052,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1131,7 +1193,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1274,7 +1335,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1422,7 +1482,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1567,7 +1626,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1684,7 +1742,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1800,7 +1857,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -1943,7 +1999,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2086,7 +2141,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_asyncio.py index 4d1c7daaeb..6525fe12af 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,7 +36,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -80,6 +80,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncLlmUtilityServiceRestInterceptor: """Asynchronous Interceptor for LlmUtilityService. @@ -138,12 +141,38 @@ async def post_compute_tokens( ) -> llm_utility_service.ComputeTokensResponse: """Post-rpc interceptor for compute_tokens - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_compute_tokens_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the LlmUtilityService server but before - it is returned to user code. + it is returned to user code. This `post_compute_tokens` interceptor runs + before the `post_compute_tokens_with_metadata` interceptor. """ return response + async def post_compute_tokens_with_metadata( + self, + response: llm_utility_service.ComputeTokensResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + llm_utility_service.ComputeTokensResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for compute_tokens + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the LlmUtilityService server but before it is returned to user code. + + We recommend only using this `post_compute_tokens_with_metadata` + interceptor in new development instead of the `post_compute_tokens` interceptor. + When both interceptors are used, this `post_compute_tokens_with_metadata` interceptor runs after the + `post_compute_tokens` interceptor. The (possibly modified) response returned by + `post_compute_tokens` will be passed to + `post_compute_tokens_with_metadata`. + """ + return response, metadata + async def pre_count_tokens( self, request: prediction_service.CountTokensRequest, @@ -163,12 +192,37 @@ async def post_count_tokens( ) -> prediction_service.CountTokensResponse: """Post-rpc interceptor for count_tokens - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_count_tokens_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the LlmUtilityService server but before - it is returned to user code. + it is returned to user code. This `post_count_tokens` interceptor runs + before the `post_count_tokens_with_metadata` interceptor. """ return response + async def post_count_tokens_with_metadata( + self, + response: prediction_service.CountTokensResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + prediction_service.CountTokensResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for count_tokens + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the LlmUtilityService server but before it is returned to user code. + + We recommend only using this `post_count_tokens_with_metadata` + interceptor in new development instead of the `post_count_tokens` interceptor. + When both interceptors are used, this `post_count_tokens_with_metadata` interceptor runs after the + `post_count_tokens` interceptor. The (possibly modified) response returned by + `post_count_tokens` will be passed to + `post_count_tokens_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -685,6 +739,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_compute_tokens(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_compute_tokens_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -845,6 +903,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_count_tokens(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_count_tokens_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -930,7 +992,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -1083,7 +1144,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1236,7 +1296,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1390,7 +1449,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1548,7 +1606,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1699,7 +1756,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1822,7 +1878,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1945,7 +2000,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -2098,7 +2152,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2251,7 +2304,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_base.py index b17ab3ea00..465640bcae 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -359,6 +359,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -445,6 +449,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -523,6 +532,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -704,6 +717,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -840,6 +857,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -848,6 +869,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -888,6 +917,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1069,6 +1102,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1097,6 +1134,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1209,6 +1250,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1217,6 +1262,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1277,6 +1330,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1458,6 +1515,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1498,6 +1559,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1610,6 +1675,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1618,6 +1687,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1678,6 +1755,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1855,6 +1936,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1895,6 +1980,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2007,6 +2096,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2027,6 +2124,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2075,6 +2176,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2260,6 +2365,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2292,6 +2401,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2400,6 +2513,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2408,6 +2525,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2468,6 +2593,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/match_service/__init__.py b/google/cloud/aiplatform_v1/services/match_service/__init__.py index ecce1154d1..418b6338d1 100644 --- a/google/cloud/aiplatform_v1/services/match_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/match_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/match_service/async_client.py b/google/cloud/aiplatform_v1/services/match_service/async_client.py index 32302ae228..e001ef9a8a 100644 --- a/google/cloud/aiplatform_v1/services/match_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/match_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -116,7 +117,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: MatchServiceAsyncClient: The constructed client. """ - return MatchServiceClient.from_service_account_info.__func__(MatchServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + MatchServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(MatchServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -132,7 +136,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: MatchServiceAsyncClient: The constructed client. """ - return MatchServiceClient.from_service_account_file.__func__(MatchServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + MatchServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(MatchServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -273,21 +280,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.MatchServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.MatchService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.MatchService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.MatchService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.MatchService", + "credentialsType": None, + } + ), ) async def find_neighbors( @@ -1177,5 +1186,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("MatchServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/match_service/client.py b/google/cloud/aiplatform_v1/services/match_service/client.py index 49d424c2b6..7daece93c9 100644 --- a/google/cloud/aiplatform_v1/services/match_service/client.py +++ b/google/cloud/aiplatform_v1/services/match_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -163,6 +166,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -350,12 +381,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = MatchServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -363,7 +390,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -395,20 +422,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = MatchServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -501,6 +522,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -589,11 +637,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MatchServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MatchServiceClient._read_environment_variables() + ) self._client_cert_source = MatchServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -702,21 +748,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.MatchServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.MatchService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.MatchService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.MatchService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.MatchService", + "credentialsType": None, + } + ), ) def find_neighbors( @@ -951,16 +1001,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1006,16 +1060,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1178,16 +1236,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1299,16 +1361,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -1421,16 +1487,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -1481,16 +1551,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -1536,16 +1610,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -1591,21 +1669,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("MatchServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/match_service/transports/__init__.py index 7f22d7e642..ab63d1e630 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/base.py b/google/cloud/aiplatform_v1/services/match_service/transports/base.py index dfba7b8cb3..189946f8be 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import match_service from google.cloud.location import locations_pb2 # type: ignore @@ -36,6 +37,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class MatchServiceTransport(abc.ABC): """Abstract transport class for MatchService.""" @@ -67,9 +71,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -82,8 +87,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -98,11 +101,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -252,13 +260,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/match_service/transports/grpc.py index 5629478cf5..d60a2a65a7 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -74,12 +74,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.MatchService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -157,9 +156,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -292,9 +292,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/match_service/transports/grpc_asyncio.py index f7f8b526f9..4267207f08 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -153,8 +153,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -205,9 +206,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/rest.py b/google/cloud/aiplatform_v1/services/match_service/transports/rest.py index 7ee3ea6e99..c91ef1b767 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -62,6 +63,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class MatchServiceRestInterceptor: """Interceptor for MatchService. @@ -119,12 +123,37 @@ def post_find_neighbors( ) -> match_service.FindNeighborsResponse: """Post-rpc interceptor for find_neighbors - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_find_neighbors_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MatchService server but before - it is returned to user code. + it is returned to user code. This `post_find_neighbors` interceptor runs + before the `post_find_neighbors_with_metadata` interceptor. """ return response + def post_find_neighbors_with_metadata( + self, + response: match_service.FindNeighborsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + match_service.FindNeighborsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for find_neighbors + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MatchService server but before it is returned to user code. + + We recommend only using this `post_find_neighbors_with_metadata` + interceptor in new development instead of the `post_find_neighbors` interceptor. + When both interceptors are used, this `post_find_neighbors_with_metadata` interceptor runs after the + `post_find_neighbors` interceptor. The (possibly modified) response returned by + `post_find_neighbors` will be passed to + `post_find_neighbors_with_metadata`. + """ + return response, metadata + def pre_read_index_datapoints( self, request: match_service.ReadIndexDatapointsRequest, @@ -145,12 +174,38 @@ def post_read_index_datapoints( ) -> match_service.ReadIndexDatapointsResponse: """Post-rpc interceptor for read_index_datapoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_index_datapoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MatchService server but before - it is returned to user code. + it is returned to user code. This `post_read_index_datapoints` interceptor runs + before the `post_read_index_datapoints_with_metadata` interceptor. """ return response + def post_read_index_datapoints_with_metadata( + self, + response: match_service.ReadIndexDatapointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + match_service.ReadIndexDatapointsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for read_index_datapoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MatchService server but before it is returned to user code. + + We recommend only using this `post_read_index_datapoints_with_metadata` + interceptor in new development instead of the `post_read_index_datapoints` interceptor. + When both interceptors are used, this `post_read_index_datapoints_with_metadata` interceptor runs after the + `post_read_index_datapoints` interceptor. The (possibly modified) response returned by + `post_read_index_datapoints` will be passed to + `post_read_index_datapoints_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -441,9 +496,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -609,6 +665,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_find_neighbors(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_find_neighbors_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -763,6 +823,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_read_index_datapoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_index_datapoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -850,7 +914,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -996,7 +1059,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1138,7 +1200,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1283,7 +1344,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1434,7 +1494,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1578,7 +1637,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1694,7 +1752,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1810,7 +1867,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -1954,7 +2010,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2096,7 +2151,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/match_service/transports/rest_asyncio.py index 5a1dbd3c4c..f1612a6a0a 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,7 +36,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -79,6 +79,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncMatchServiceRestInterceptor: """Asynchronous Interceptor for MatchService. @@ -136,12 +139,37 @@ async def post_find_neighbors( ) -> match_service.FindNeighborsResponse: """Post-rpc interceptor for find_neighbors - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_find_neighbors_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MatchService server but before - it is returned to user code. + it is returned to user code. This `post_find_neighbors` interceptor runs + before the `post_find_neighbors_with_metadata` interceptor. """ return response + async def post_find_neighbors_with_metadata( + self, + response: match_service.FindNeighborsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + match_service.FindNeighborsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for find_neighbors + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MatchService server but before it is returned to user code. + + We recommend only using this `post_find_neighbors_with_metadata` + interceptor in new development instead of the `post_find_neighbors` interceptor. + When both interceptors are used, this `post_find_neighbors_with_metadata` interceptor runs after the + `post_find_neighbors` interceptor. The (possibly modified) response returned by + `post_find_neighbors` will be passed to + `post_find_neighbors_with_metadata`. + """ + return response, metadata + async def pre_read_index_datapoints( self, request: match_service.ReadIndexDatapointsRequest, @@ -162,12 +190,38 @@ async def post_read_index_datapoints( ) -> match_service.ReadIndexDatapointsResponse: """Post-rpc interceptor for read_index_datapoints - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_index_datapoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MatchService server but before - it is returned to user code. + it is returned to user code. This `post_read_index_datapoints` interceptor runs + before the `post_read_index_datapoints_with_metadata` interceptor. """ return response + async def post_read_index_datapoints_with_metadata( + self, + response: match_service.ReadIndexDatapointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + match_service.ReadIndexDatapointsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for read_index_datapoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MatchService server but before it is returned to user code. + + We recommend only using this `post_read_index_datapoints_with_metadata` + interceptor in new development instead of the `post_read_index_datapoints` interceptor. + When both interceptors are used, this `post_read_index_datapoints_with_metadata` interceptor runs after the + `post_read_index_datapoints` interceptor. The (possibly modified) response returned by + `post_read_index_datapoints` will be passed to + `post_read_index_datapoints_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -684,6 +738,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_find_neighbors(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_find_neighbors_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -847,6 +905,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_read_index_datapoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_read_index_datapoints_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -931,7 +993,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -1085,7 +1146,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1237,7 +1297,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1390,7 +1449,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1550,7 +1608,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1702,7 +1759,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1826,7 +1882,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1950,7 +2005,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -2102,7 +2156,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2254,7 +2307,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/match_service/transports/rest_base.py index 416308b2a3..01b69f1385 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -328,6 +328,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -414,6 +418,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -492,6 +501,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -673,6 +686,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -809,6 +826,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -817,6 +838,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -857,6 +886,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1038,6 +1071,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1066,6 +1103,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1178,6 +1219,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1186,6 +1231,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1246,6 +1299,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1427,6 +1484,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1467,6 +1528,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1579,6 +1644,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1587,6 +1656,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1647,6 +1724,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1824,6 +1905,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1864,6 +1949,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -1976,6 +2065,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -1996,6 +2093,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2044,6 +2145,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2229,6 +2334,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2261,6 +2370,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2369,6 +2482,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2377,6 +2494,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2437,6 +2562,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/metadata_service/__init__.py b/google/cloud/aiplatform_v1/services/metadata_service/__init__.py index 6cdde109f5..74e9509053 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py index 2d90d265e4..2a26209097 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.metadata_service import pagers from google.cloud.aiplatform_v1.types import artifact from google.cloud.aiplatform_v1.types import artifact as gca_artifact @@ -66,10 +65,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport from .client import MetadataServiceClient @@ -148,7 +149,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: MetadataServiceAsyncClient: The constructed client. """ - return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + MetadataServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(MetadataServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -164,7 +168,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: MetadataServiceAsyncClient: The constructed client. """ - return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + MetadataServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(MetadataServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -307,21 +314,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.MetadataServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.MetadataService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.MetadataService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.MetadataService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.MetadataService", + "credentialsType": None, + } + ), ) async def create_metadata_store( @@ -423,7 +432,10 @@ async def sample_create_metadata_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_store, metadata_store_id]) + flattened_params = [parent, metadata_store, metadata_store_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -545,7 +557,10 @@ async def sample_get_metadata_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -660,7 +675,10 @@ async def sample_list_metadata_stores(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -797,7 +815,10 @@ async def sample_delete_metadata_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -934,7 +955,10 @@ async def sample_create_artifact(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, artifact, artifact_id]) + flattened_params = [parent, artifact, artifact_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1045,7 +1069,10 @@ async def sample_get_artifact(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1158,7 +1185,10 @@ async def sample_list_artifacts(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1286,7 +1316,10 @@ async def sample_update_artifact(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact, update_mask]) + flattened_params = [artifact, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1413,7 +1446,10 @@ async def sample_delete_artifact(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1537,7 +1573,10 @@ async def sample_purge_artifacts(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1674,7 +1713,10 @@ async def sample_create_context(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, context, context_id]) + flattened_params = [parent, context, context_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1785,7 +1827,10 @@ async def sample_get_context(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1898,7 +1943,10 @@ async def sample_list_contexts(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2025,7 +2073,10 @@ async def sample_update_context(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, update_mask]) + flattened_params = [context, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2152,7 +2203,10 @@ async def sample_delete_context(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2276,7 +2330,10 @@ async def sample_purge_contexts(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2419,7 +2476,10 @@ async def sample_add_context_artifacts_and_executions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, artifacts, executions]) + flattened_params = [context, artifacts, executions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2549,7 +2609,10 @@ async def sample_add_context_children(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) + flattened_params = [context, child_contexts] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2673,7 +2736,10 @@ async def sample_remove_context_children(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) + flattened_params = [context, child_contexts] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2795,7 +2861,10 @@ async def sample_query_context_lineage_subgraph(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context]) + flattened_params = [context] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2924,7 +2993,10 @@ async def sample_create_execution(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, execution, execution_id]) + flattened_params = [parent, execution, execution_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3035,7 +3107,10 @@ async def sample_get_execution(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3148,7 +3223,10 @@ async def sample_list_executions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3276,7 +3354,10 @@ async def sample_update_execution(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, update_mask]) + flattened_params = [execution, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3403,7 +3484,10 @@ async def sample_delete_execution(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3527,7 +3611,10 @@ async def sample_purge_executions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3656,7 +3743,10 @@ async def sample_add_execution_events(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, events]) + flattened_params = [execution, events] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3775,7 +3865,10 @@ async def sample_query_execution_inputs_and_outputs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution]) + flattened_params = [execution] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3916,7 +4009,10 @@ async def sample_create_metadata_schema(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + flattened_params = [parent, metadata_schema, metadata_schema_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4029,7 +4125,10 @@ async def sample_get_metadata_schema(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4144,7 +4243,10 @@ async def sample_list_metadata_schemas(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4275,7 +4377,10 @@ async def sample_query_artifact_lineage_subgraph(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact]) + flattened_params = [artifact] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -5028,5 +5133,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("MetadataServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/client.py b/google/cloud/aiplatform_v1/services/metadata_service/client.py index 44a0ec4e44..182efc1aa2 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/client.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.metadata_service import pagers from google.cloud.aiplatform_v1.types import artifact from google.cloud.aiplatform_v1.types import artifact as gca_artifact @@ -80,10 +81,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import MetadataServiceGrpcTransport from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport @@ -183,6 +186,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -466,12 +497,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = MetadataServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -479,7 +506,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -511,20 +538,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = MetadataServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -617,6 +638,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -707,11 +755,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MetadataServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MetadataServiceClient._read_environment_variables() + ) self._client_cert_source = MetadataServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -823,21 +869,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.MetadataServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.MetadataService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.MetadataService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.MetadataService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.MetadataService", + "credentialsType": None, + } + ), ) def create_metadata_store( @@ -939,7 +989,10 @@ def sample_create_metadata_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_store, metadata_store_id]) + flattened_params = [parent, metadata_store, metadata_store_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1058,7 +1111,10 @@ def sample_get_metadata_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1170,7 +1226,10 @@ def sample_list_metadata_stores(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1304,7 +1363,10 @@ def sample_delete_metadata_store(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1438,7 +1500,10 @@ def sample_create_artifact(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, artifact, artifact_id]) + flattened_params = [parent, artifact, artifact_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1546,7 +1611,10 @@ def sample_get_artifact(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1656,7 +1724,10 @@ def sample_list_artifacts(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1781,7 +1852,10 @@ def sample_update_artifact(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact, update_mask]) + flattened_params = [artifact, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1905,7 +1979,10 @@ def sample_delete_artifact(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2026,7 +2103,10 @@ def sample_purge_artifacts(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2160,7 +2240,10 @@ def sample_create_context(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, context, context_id]) + flattened_params = [parent, context, context_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2268,7 +2351,10 @@ def sample_get_context(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2378,7 +2464,10 @@ def sample_list_contexts(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2502,7 +2591,10 @@ def sample_update_context(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, update_mask]) + flattened_params = [context, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2626,7 +2718,10 @@ def sample_delete_context(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2747,7 +2842,10 @@ def sample_purge_contexts(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2887,7 +2985,10 @@ def sample_add_context_artifacts_and_executions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, artifacts, executions]) + flattened_params = [context, artifacts, executions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3016,7 +3117,10 @@ def sample_add_context_children(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) + flattened_params = [context, child_contexts] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3137,7 +3241,10 @@ def sample_remove_context_children(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) + flattened_params = [context, child_contexts] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3256,7 +3363,10 @@ def sample_query_context_lineage_subgraph(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([context]) + flattened_params = [context] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3384,7 +3494,10 @@ def sample_create_execution(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, execution, execution_id]) + flattened_params = [parent, execution, execution_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3492,7 +3605,10 @@ def sample_get_execution(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3602,7 +3718,10 @@ def sample_list_executions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3727,7 +3846,10 @@ def sample_update_execution(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, update_mask]) + flattened_params = [execution, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3851,7 +3973,10 @@ def sample_delete_execution(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3972,7 +4097,10 @@ def sample_purge_executions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4098,7 +4226,10 @@ def sample_add_execution_events(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, events]) + flattened_params = [execution, events] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4214,7 +4345,10 @@ def sample_query_execution_inputs_and_outputs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution]) + flattened_params = [execution] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4354,7 +4488,10 @@ def sample_create_metadata_schema(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + flattened_params = [parent, metadata_schema, metadata_schema_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4464,7 +4601,10 @@ def sample_get_metadata_schema(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4576,7 +4716,10 @@ def sample_list_metadata_schemas(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4704,7 +4847,10 @@ def sample_query_artifact_lineage_subgraph(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact]) + flattened_params = [artifact] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -4805,16 +4951,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -4860,16 +5010,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -5032,16 +5186,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -5153,16 +5311,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -5275,16 +5437,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -5335,16 +5501,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -5390,16 +5560,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -5445,21 +5619,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("MetadataServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/metadata_service/pagers.py b/google/cloud/aiplatform_v1/services/metadata_service/pagers.py index 31e3d76f68..ea4f70ac89 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py index d2289832e7..e840719c6a 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py index 65dc600247..d481046adb 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import artifact from google.cloud.aiplatform_v1.types import artifact as gca_artifact @@ -47,6 +48,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class MetadataServiceTransport(abc.ABC): """Abstract transport class for MetadataService.""" @@ -78,9 +82,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -93,8 +98,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -109,11 +112,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -723,13 +731,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py index 84ee8a5805..1dd48559c9 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -85,12 +85,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.MetadataService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -167,9 +166,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -303,9 +303,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -809,12 +810,12 @@ def add_context_artifacts_and_executions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "add_context_artifacts_and_executions" not in self._stubs: - self._stubs[ - "add_context_artifacts_and_executions" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions", - request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, - response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + self._stubs["add_context_artifacts_and_executions"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions", + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) ) return self._stubs["add_context_artifacts_and_executions"] @@ -906,12 +907,12 @@ def query_context_lineage_subgraph( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "query_context_lineage_subgraph" not in self._stubs: - self._stubs[ - "query_context_lineage_subgraph" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph", - request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + self._stubs["query_context_lineage_subgraph"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph", + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) ) return self._stubs["query_context_lineage_subgraph"] @@ -1132,12 +1133,12 @@ def query_execution_inputs_and_outputs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "query_execution_inputs_and_outputs" not in self._stubs: - self._stubs[ - "query_execution_inputs_and_outputs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs", - request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + self._stubs["query_execution_inputs_and_outputs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs", + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) ) return self._stubs["query_execution_inputs_and_outputs"] @@ -1252,12 +1253,12 @@ def query_artifact_lineage_subgraph( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "query_artifact_lineage_subgraph" not in self._stubs: - self._stubs[ - "query_artifact_lineage_subgraph" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph", - request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + self._stubs["query_artifact_lineage_subgraph"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph", + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) ) return self._stubs["query_artifact_lineage_subgraph"] diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py index d8142dcac5..4a6d803fd2 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -163,8 +163,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -215,9 +216,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -837,12 +839,12 @@ def add_context_artifacts_and_executions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "add_context_artifacts_and_executions" not in self._stubs: - self._stubs[ - "add_context_artifacts_and_executions" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions", - request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, - response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + self._stubs["add_context_artifacts_and_executions"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions", + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) ) return self._stubs["add_context_artifacts_and_executions"] @@ -934,12 +936,12 @@ def query_context_lineage_subgraph( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "query_context_lineage_subgraph" not in self._stubs: - self._stubs[ - "query_context_lineage_subgraph" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph", - request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + self._stubs["query_context_lineage_subgraph"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph", + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) ) return self._stubs["query_context_lineage_subgraph"] @@ -1170,12 +1172,12 @@ def query_execution_inputs_and_outputs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "query_execution_inputs_and_outputs" not in self._stubs: - self._stubs[ - "query_execution_inputs_and_outputs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs", - request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + self._stubs["query_execution_inputs_and_outputs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs", + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) ) return self._stubs["query_execution_inputs_and_outputs"] @@ -1291,12 +1293,12 @@ def query_artifact_lineage_subgraph( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "query_artifact_lineage_subgraph" not in self._stubs: - self._stubs[ - "query_artifact_lineage_subgraph" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph", - request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + self._stubs["query_artifact_lineage_subgraph"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph", + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) ) return self._stubs["query_artifact_lineage_subgraph"] diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py index 62dbd4a8d8..8be13d5517 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -73,6 +74,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class MetadataServiceRestInterceptor: """Interceptor for MetadataService. @@ -371,12 +375,38 @@ def post_add_context_artifacts_and_executions( ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: """Post-rpc interceptor for add_context_artifacts_and_executions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_add_context_artifacts_and_executions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_add_context_artifacts_and_executions` interceptor runs + before the `post_add_context_artifacts_and_executions_with_metadata` interceptor. """ return response + def post_add_context_artifacts_and_executions_with_metadata( + self, + response: metadata_service.AddContextArtifactsAndExecutionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.AddContextArtifactsAndExecutionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for add_context_artifacts_and_executions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_add_context_artifacts_and_executions_with_metadata` + interceptor in new development instead of the `post_add_context_artifacts_and_executions` interceptor. + When both interceptors are used, this `post_add_context_artifacts_and_executions_with_metadata` interceptor runs after the + `post_add_context_artifacts_and_executions` interceptor. The (possibly modified) response returned by + `post_add_context_artifacts_and_executions` will be passed to + `post_add_context_artifacts_and_executions_with_metadata`. + """ + return response, metadata + def pre_add_context_children( self, request: metadata_service.AddContextChildrenRequest, @@ -397,12 +427,38 @@ def post_add_context_children( ) -> metadata_service.AddContextChildrenResponse: """Post-rpc interceptor for add_context_children - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_add_context_children_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_add_context_children` interceptor runs + before the `post_add_context_children_with_metadata` interceptor. """ return response + def post_add_context_children_with_metadata( + self, + response: metadata_service.AddContextChildrenResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.AddContextChildrenResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for add_context_children + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_add_context_children_with_metadata` + interceptor in new development instead of the `post_add_context_children` interceptor. + When both interceptors are used, this `post_add_context_children_with_metadata` interceptor runs after the + `post_add_context_children` interceptor. The (possibly modified) response returned by + `post_add_context_children` will be passed to + `post_add_context_children_with_metadata`. + """ + return response, metadata + def pre_add_execution_events( self, request: metadata_service.AddExecutionEventsRequest, @@ -423,12 +479,38 @@ def post_add_execution_events( ) -> metadata_service.AddExecutionEventsResponse: """Post-rpc interceptor for add_execution_events - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_add_execution_events_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_add_execution_events` interceptor runs + before the `post_add_execution_events_with_metadata` interceptor. """ return response + def post_add_execution_events_with_metadata( + self, + response: metadata_service.AddExecutionEventsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.AddExecutionEventsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for add_execution_events + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_add_execution_events_with_metadata` + interceptor in new development instead of the `post_add_execution_events` interceptor. + When both interceptors are used, this `post_add_execution_events_with_metadata` interceptor runs after the + `post_add_execution_events` interceptor. The (possibly modified) response returned by + `post_add_execution_events` will be passed to + `post_add_execution_events_with_metadata`. + """ + return response, metadata + def pre_create_artifact( self, request: metadata_service.CreateArtifactRequest, @@ -448,12 +530,35 @@ def post_create_artifact( ) -> gca_artifact.Artifact: """Post-rpc interceptor for create_artifact - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_artifact_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_artifact` interceptor runs + before the `post_create_artifact_with_metadata` interceptor. """ return response + def post_create_artifact_with_metadata( + self, + response: gca_artifact.Artifact, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_artifact.Artifact, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_artifact + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_artifact_with_metadata` + interceptor in new development instead of the `post_create_artifact` interceptor. + When both interceptors are used, this `post_create_artifact_with_metadata` interceptor runs after the + `post_create_artifact` interceptor. The (possibly modified) response returned by + `post_create_artifact` will be passed to + `post_create_artifact_with_metadata`. + """ + return response, metadata + def pre_create_context( self, request: metadata_service.CreateContextRequest, @@ -471,12 +576,35 @@ def pre_create_context( def post_create_context(self, response: gca_context.Context) -> gca_context.Context: """Post-rpc interceptor for create_context - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_context_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_context` interceptor runs + before the `post_create_context_with_metadata` interceptor. """ return response + def post_create_context_with_metadata( + self, + response: gca_context.Context, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_context.Context, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_context + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_context_with_metadata` + interceptor in new development instead of the `post_create_context` interceptor. + When both interceptors are used, this `post_create_context_with_metadata` interceptor runs after the + `post_create_context` interceptor. The (possibly modified) response returned by + `post_create_context` will be passed to + `post_create_context_with_metadata`. + """ + return response, metadata + def pre_create_execution( self, request: metadata_service.CreateExecutionRequest, @@ -496,12 +624,35 @@ def post_create_execution( ) -> gca_execution.Execution: """Post-rpc interceptor for create_execution - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_execution_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_execution` interceptor runs + before the `post_create_execution_with_metadata` interceptor. """ return response + def post_create_execution_with_metadata( + self, + response: gca_execution.Execution, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_execution.Execution, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_execution + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_execution_with_metadata` + interceptor in new development instead of the `post_create_execution` interceptor. + When both interceptors are used, this `post_create_execution_with_metadata` interceptor runs after the + `post_create_execution` interceptor. The (possibly modified) response returned by + `post_create_execution` will be passed to + `post_create_execution_with_metadata`. + """ + return response, metadata + def pre_create_metadata_schema( self, request: metadata_service.CreateMetadataSchemaRequest, @@ -522,12 +673,37 @@ def post_create_metadata_schema( ) -> gca_metadata_schema.MetadataSchema: """Post-rpc interceptor for create_metadata_schema - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_metadata_schema_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_metadata_schema` interceptor runs + before the `post_create_metadata_schema_with_metadata` interceptor. """ return response + def post_create_metadata_schema_with_metadata( + self, + response: gca_metadata_schema.MetadataSchema, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_metadata_schema.MetadataSchema, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_metadata_schema + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_metadata_schema_with_metadata` + interceptor in new development instead of the `post_create_metadata_schema` interceptor. + When both interceptors are used, this `post_create_metadata_schema_with_metadata` interceptor runs after the + `post_create_metadata_schema` interceptor. The (possibly modified) response returned by + `post_create_metadata_schema` will be passed to + `post_create_metadata_schema_with_metadata`. + """ + return response, metadata + def pre_create_metadata_store( self, request: metadata_service.CreateMetadataStoreRequest, @@ -548,12 +724,35 @@ def post_create_metadata_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_metadata_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_metadata_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_metadata_store` interceptor runs + before the `post_create_metadata_store_with_metadata` interceptor. """ return response + def post_create_metadata_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_metadata_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_metadata_store_with_metadata` + interceptor in new development instead of the `post_create_metadata_store` interceptor. + When both interceptors are used, this `post_create_metadata_store_with_metadata` interceptor runs after the + `post_create_metadata_store` interceptor. The (possibly modified) response returned by + `post_create_metadata_store` will be passed to + `post_create_metadata_store_with_metadata`. + """ + return response, metadata + def pre_delete_artifact( self, request: metadata_service.DeleteArtifactRequest, @@ -573,12 +772,35 @@ def post_delete_artifact( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_artifact - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_artifact_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_delete_artifact` interceptor runs + before the `post_delete_artifact_with_metadata` interceptor. """ return response + def post_delete_artifact_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_artifact + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_delete_artifact_with_metadata` + interceptor in new development instead of the `post_delete_artifact` interceptor. + When both interceptors are used, this `post_delete_artifact_with_metadata` interceptor runs after the + `post_delete_artifact` interceptor. The (possibly modified) response returned by + `post_delete_artifact` will be passed to + `post_delete_artifact_with_metadata`. + """ + return response, metadata + def pre_delete_context( self, request: metadata_service.DeleteContextRequest, @@ -598,12 +820,35 @@ def post_delete_context( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_context - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_context_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_delete_context` interceptor runs + before the `post_delete_context_with_metadata` interceptor. """ return response + def post_delete_context_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_context + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_delete_context_with_metadata` + interceptor in new development instead of the `post_delete_context` interceptor. + When both interceptors are used, this `post_delete_context_with_metadata` interceptor runs after the + `post_delete_context` interceptor. The (possibly modified) response returned by + `post_delete_context` will be passed to + `post_delete_context_with_metadata`. + """ + return response, metadata + def pre_delete_execution( self, request: metadata_service.DeleteExecutionRequest, @@ -623,12 +868,35 @@ def post_delete_execution( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_execution - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_execution_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_delete_execution` interceptor runs + before the `post_delete_execution_with_metadata` interceptor. """ return response + def post_delete_execution_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_execution + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_delete_execution_with_metadata` + interceptor in new development instead of the `post_delete_execution` interceptor. + When both interceptors are used, this `post_delete_execution_with_metadata` interceptor runs after the + `post_delete_execution` interceptor. The (possibly modified) response returned by + `post_delete_execution` will be passed to + `post_delete_execution_with_metadata`. + """ + return response, metadata + def pre_delete_metadata_store( self, request: metadata_service.DeleteMetadataStoreRequest, @@ -649,12 +917,35 @@ def post_delete_metadata_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_metadata_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_metadata_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_delete_metadata_store` interceptor runs + before the `post_delete_metadata_store_with_metadata` interceptor. """ return response + def post_delete_metadata_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_metadata_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_delete_metadata_store_with_metadata` + interceptor in new development instead of the `post_delete_metadata_store` interceptor. + When both interceptors are used, this `post_delete_metadata_store_with_metadata` interceptor runs after the + `post_delete_metadata_store` interceptor. The (possibly modified) response returned by + `post_delete_metadata_store` will be passed to + `post_delete_metadata_store_with_metadata`. + """ + return response, metadata + def pre_get_artifact( self, request: metadata_service.GetArtifactRequest, @@ -672,12 +963,35 @@ def pre_get_artifact( def post_get_artifact(self, response: artifact.Artifact) -> artifact.Artifact: """Post-rpc interceptor for get_artifact - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_artifact_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_artifact` interceptor runs + before the `post_get_artifact_with_metadata` interceptor. """ return response + def post_get_artifact_with_metadata( + self, + response: artifact.Artifact, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[artifact.Artifact, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_artifact + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_artifact_with_metadata` + interceptor in new development instead of the `post_get_artifact` interceptor. + When both interceptors are used, this `post_get_artifact_with_metadata` interceptor runs after the + `post_get_artifact` interceptor. The (possibly modified) response returned by + `post_get_artifact` will be passed to + `post_get_artifact_with_metadata`. + """ + return response, metadata + def pre_get_context( self, request: metadata_service.GetContextRequest, @@ -695,12 +1009,35 @@ def pre_get_context( def post_get_context(self, response: context.Context) -> context.Context: """Post-rpc interceptor for get_context - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_context_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_context` interceptor runs + before the `post_get_context_with_metadata` interceptor. """ return response + def post_get_context_with_metadata( + self, + response: context.Context, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[context.Context, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_context + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_context_with_metadata` + interceptor in new development instead of the `post_get_context` interceptor. + When both interceptors are used, this `post_get_context_with_metadata` interceptor runs after the + `post_get_context` interceptor. The (possibly modified) response returned by + `post_get_context` will be passed to + `post_get_context_with_metadata`. + """ + return response, metadata + def pre_get_execution( self, request: metadata_service.GetExecutionRequest, @@ -718,12 +1055,35 @@ def pre_get_execution( def post_get_execution(self, response: execution.Execution) -> execution.Execution: """Post-rpc interceptor for get_execution - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_execution_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_execution` interceptor runs + before the `post_get_execution_with_metadata` interceptor. """ return response + def post_get_execution_with_metadata( + self, + response: execution.Execution, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[execution.Execution, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_execution + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_execution_with_metadata` + interceptor in new development instead of the `post_get_execution` interceptor. + When both interceptors are used, this `post_get_execution_with_metadata` interceptor runs after the + `post_get_execution` interceptor. The (possibly modified) response returned by + `post_get_execution` will be passed to + `post_get_execution_with_metadata`. + """ + return response, metadata + def pre_get_metadata_schema( self, request: metadata_service.GetMetadataSchemaRequest, @@ -744,12 +1104,35 @@ def post_get_metadata_schema( ) -> metadata_schema.MetadataSchema: """Post-rpc interceptor for get_metadata_schema - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_metadata_schema_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_metadata_schema` interceptor runs + before the `post_get_metadata_schema_with_metadata` interceptor. """ return response + def post_get_metadata_schema_with_metadata( + self, + response: metadata_schema.MetadataSchema, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[metadata_schema.MetadataSchema, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_metadata_schema + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_metadata_schema_with_metadata` + interceptor in new development instead of the `post_get_metadata_schema` interceptor. + When both interceptors are used, this `post_get_metadata_schema_with_metadata` interceptor runs after the + `post_get_metadata_schema` interceptor. The (possibly modified) response returned by + `post_get_metadata_schema` will be passed to + `post_get_metadata_schema_with_metadata`. + """ + return response, metadata + def pre_get_metadata_store( self, request: metadata_service.GetMetadataStoreRequest, @@ -770,12 +1153,35 @@ def post_get_metadata_store( ) -> metadata_store.MetadataStore: """Post-rpc interceptor for get_metadata_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_metadata_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_metadata_store` interceptor runs + before the `post_get_metadata_store_with_metadata` interceptor. """ return response + def post_get_metadata_store_with_metadata( + self, + response: metadata_store.MetadataStore, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[metadata_store.MetadataStore, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_metadata_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_metadata_store_with_metadata` + interceptor in new development instead of the `post_get_metadata_store` interceptor. + When both interceptors are used, this `post_get_metadata_store_with_metadata` interceptor runs after the + `post_get_metadata_store` interceptor. The (possibly modified) response returned by + `post_get_metadata_store` will be passed to + `post_get_metadata_store_with_metadata`. + """ + return response, metadata + def pre_list_artifacts( self, request: metadata_service.ListArtifactsRequest, @@ -795,12 +1201,37 @@ def post_list_artifacts( ) -> metadata_service.ListArtifactsResponse: """Post-rpc interceptor for list_artifacts - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_artifacts_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_artifacts` interceptor runs + before the `post_list_artifacts_with_metadata` interceptor. """ return response + def post_list_artifacts_with_metadata( + self, + response: metadata_service.ListArtifactsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListArtifactsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_artifacts + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_artifacts_with_metadata` + interceptor in new development instead of the `post_list_artifacts` interceptor. + When both interceptors are used, this `post_list_artifacts_with_metadata` interceptor runs after the + `post_list_artifacts` interceptor. The (possibly modified) response returned by + `post_list_artifacts` will be passed to + `post_list_artifacts_with_metadata`. + """ + return response, metadata + def pre_list_contexts( self, request: metadata_service.ListContextsRequest, @@ -820,12 +1251,37 @@ def post_list_contexts( ) -> metadata_service.ListContextsResponse: """Post-rpc interceptor for list_contexts - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_contexts_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_contexts` interceptor runs + before the `post_list_contexts_with_metadata` interceptor. """ return response + def post_list_contexts_with_metadata( + self, + response: metadata_service.ListContextsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListContextsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_contexts + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_contexts_with_metadata` + interceptor in new development instead of the `post_list_contexts` interceptor. + When both interceptors are used, this `post_list_contexts_with_metadata` interceptor runs after the + `post_list_contexts` interceptor. The (possibly modified) response returned by + `post_list_contexts` will be passed to + `post_list_contexts_with_metadata`. + """ + return response, metadata + def pre_list_executions( self, request: metadata_service.ListExecutionsRequest, @@ -845,12 +1301,37 @@ def post_list_executions( ) -> metadata_service.ListExecutionsResponse: """Post-rpc interceptor for list_executions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_executions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_executions` interceptor runs + before the `post_list_executions_with_metadata` interceptor. """ return response + def post_list_executions_with_metadata( + self, + response: metadata_service.ListExecutionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListExecutionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_executions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_executions_with_metadata` + interceptor in new development instead of the `post_list_executions` interceptor. + When both interceptors are used, this `post_list_executions_with_metadata` interceptor runs after the + `post_list_executions` interceptor. The (possibly modified) response returned by + `post_list_executions` will be passed to + `post_list_executions_with_metadata`. + """ + return response, metadata + def pre_list_metadata_schemas( self, request: metadata_service.ListMetadataSchemasRequest, @@ -871,12 +1352,38 @@ def post_list_metadata_schemas( ) -> metadata_service.ListMetadataSchemasResponse: """Post-rpc interceptor for list_metadata_schemas - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_metadata_schemas_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_metadata_schemas` interceptor runs + before the `post_list_metadata_schemas_with_metadata` interceptor. """ return response + def post_list_metadata_schemas_with_metadata( + self, + response: metadata_service.ListMetadataSchemasResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListMetadataSchemasResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_metadata_schemas + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_metadata_schemas_with_metadata` + interceptor in new development instead of the `post_list_metadata_schemas` interceptor. + When both interceptors are used, this `post_list_metadata_schemas_with_metadata` interceptor runs after the + `post_list_metadata_schemas` interceptor. The (possibly modified) response returned by + `post_list_metadata_schemas` will be passed to + `post_list_metadata_schemas_with_metadata`. + """ + return response, metadata + def pre_list_metadata_stores( self, request: metadata_service.ListMetadataStoresRequest, @@ -897,12 +1404,38 @@ def post_list_metadata_stores( ) -> metadata_service.ListMetadataStoresResponse: """Post-rpc interceptor for list_metadata_stores - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_metadata_stores_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_metadata_stores` interceptor runs + before the `post_list_metadata_stores_with_metadata` interceptor. """ return response + def post_list_metadata_stores_with_metadata( + self, + response: metadata_service.ListMetadataStoresResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListMetadataStoresResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_metadata_stores + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_metadata_stores_with_metadata` + interceptor in new development instead of the `post_list_metadata_stores` interceptor. + When both interceptors are used, this `post_list_metadata_stores_with_metadata` interceptor runs after the + `post_list_metadata_stores` interceptor. The (possibly modified) response returned by + `post_list_metadata_stores` will be passed to + `post_list_metadata_stores_with_metadata`. + """ + return response, metadata + def pre_purge_artifacts( self, request: metadata_service.PurgeArtifactsRequest, @@ -922,11 +1455,34 @@ def post_purge_artifacts( ) -> operations_pb2.Operation: """Post-rpc interceptor for purge_artifacts - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_purge_artifacts_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_purge_artifacts` interceptor runs + before the `post_purge_artifacts_with_metadata` interceptor. + """ + return response + + def post_purge_artifacts_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for purge_artifacts + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_purge_artifacts_with_metadata` + interceptor in new development instead of the `post_purge_artifacts` interceptor. + When both interceptors are used, this `post_purge_artifacts_with_metadata` interceptor runs after the + `post_purge_artifacts` interceptor. The (possibly modified) response returned by + `post_purge_artifacts` will be passed to + `post_purge_artifacts_with_metadata`. """ - return response + return response, metadata def pre_purge_contexts( self, @@ -947,12 +1503,35 @@ def post_purge_contexts( ) -> operations_pb2.Operation: """Post-rpc interceptor for purge_contexts - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_purge_contexts_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_purge_contexts` interceptor runs + before the `post_purge_contexts_with_metadata` interceptor. """ return response + def post_purge_contexts_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for purge_contexts + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_purge_contexts_with_metadata` + interceptor in new development instead of the `post_purge_contexts` interceptor. + When both interceptors are used, this `post_purge_contexts_with_metadata` interceptor runs after the + `post_purge_contexts` interceptor. The (possibly modified) response returned by + `post_purge_contexts` will be passed to + `post_purge_contexts_with_metadata`. + """ + return response, metadata + def pre_purge_executions( self, request: metadata_service.PurgeExecutionsRequest, @@ -972,12 +1551,35 @@ def post_purge_executions( ) -> operations_pb2.Operation: """Post-rpc interceptor for purge_executions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_purge_executions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_purge_executions` interceptor runs + before the `post_purge_executions_with_metadata` interceptor. """ return response + def post_purge_executions_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for purge_executions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_purge_executions_with_metadata` + interceptor in new development instead of the `post_purge_executions` interceptor. + When both interceptors are used, this `post_purge_executions_with_metadata` interceptor runs after the + `post_purge_executions` interceptor. The (possibly modified) response returned by + `post_purge_executions` will be passed to + `post_purge_executions_with_metadata`. + """ + return response, metadata + def pre_query_artifact_lineage_subgraph( self, request: metadata_service.QueryArtifactLineageSubgraphRequest, @@ -998,12 +1600,37 @@ def post_query_artifact_lineage_subgraph( ) -> lineage_subgraph.LineageSubgraph: """Post-rpc interceptor for query_artifact_lineage_subgraph - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_query_artifact_lineage_subgraph_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_query_artifact_lineage_subgraph` interceptor runs + before the `post_query_artifact_lineage_subgraph_with_metadata` interceptor. """ return response + def post_query_artifact_lineage_subgraph_with_metadata( + self, + response: lineage_subgraph.LineageSubgraph, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + lineage_subgraph.LineageSubgraph, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for query_artifact_lineage_subgraph + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_query_artifact_lineage_subgraph_with_metadata` + interceptor in new development instead of the `post_query_artifact_lineage_subgraph` interceptor. + When both interceptors are used, this `post_query_artifact_lineage_subgraph_with_metadata` interceptor runs after the + `post_query_artifact_lineage_subgraph` interceptor. The (possibly modified) response returned by + `post_query_artifact_lineage_subgraph` will be passed to + `post_query_artifact_lineage_subgraph_with_metadata`. + """ + return response, metadata + def pre_query_context_lineage_subgraph( self, request: metadata_service.QueryContextLineageSubgraphRequest, @@ -1024,12 +1651,37 @@ def post_query_context_lineage_subgraph( ) -> lineage_subgraph.LineageSubgraph: """Post-rpc interceptor for query_context_lineage_subgraph - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_query_context_lineage_subgraph_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_query_context_lineage_subgraph` interceptor runs + before the `post_query_context_lineage_subgraph_with_metadata` interceptor. """ return response + def post_query_context_lineage_subgraph_with_metadata( + self, + response: lineage_subgraph.LineageSubgraph, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + lineage_subgraph.LineageSubgraph, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for query_context_lineage_subgraph + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_query_context_lineage_subgraph_with_metadata` + interceptor in new development instead of the `post_query_context_lineage_subgraph` interceptor. + When both interceptors are used, this `post_query_context_lineage_subgraph_with_metadata` interceptor runs after the + `post_query_context_lineage_subgraph` interceptor. The (possibly modified) response returned by + `post_query_context_lineage_subgraph` will be passed to + `post_query_context_lineage_subgraph_with_metadata`. + """ + return response, metadata + def pre_query_execution_inputs_and_outputs( self, request: metadata_service.QueryExecutionInputsAndOutputsRequest, @@ -1050,12 +1702,37 @@ def post_query_execution_inputs_and_outputs( ) -> lineage_subgraph.LineageSubgraph: """Post-rpc interceptor for query_execution_inputs_and_outputs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_query_execution_inputs_and_outputs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_query_execution_inputs_and_outputs` interceptor runs + before the `post_query_execution_inputs_and_outputs_with_metadata` interceptor. """ return response + def post_query_execution_inputs_and_outputs_with_metadata( + self, + response: lineage_subgraph.LineageSubgraph, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + lineage_subgraph.LineageSubgraph, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for query_execution_inputs_and_outputs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_query_execution_inputs_and_outputs_with_metadata` + interceptor in new development instead of the `post_query_execution_inputs_and_outputs` interceptor. + When both interceptors are used, this `post_query_execution_inputs_and_outputs_with_metadata` interceptor runs after the + `post_query_execution_inputs_and_outputs` interceptor. The (possibly modified) response returned by + `post_query_execution_inputs_and_outputs` will be passed to + `post_query_execution_inputs_and_outputs_with_metadata`. + """ + return response, metadata + def pre_remove_context_children( self, request: metadata_service.RemoveContextChildrenRequest, @@ -1076,12 +1753,38 @@ def post_remove_context_children( ) -> metadata_service.RemoveContextChildrenResponse: """Post-rpc interceptor for remove_context_children - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_remove_context_children_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_remove_context_children` interceptor runs + before the `post_remove_context_children_with_metadata` interceptor. """ return response + def post_remove_context_children_with_metadata( + self, + response: metadata_service.RemoveContextChildrenResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.RemoveContextChildrenResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for remove_context_children + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_remove_context_children_with_metadata` + interceptor in new development instead of the `post_remove_context_children` interceptor. + When both interceptors are used, this `post_remove_context_children_with_metadata` interceptor runs after the + `post_remove_context_children` interceptor. The (possibly modified) response returned by + `post_remove_context_children` will be passed to + `post_remove_context_children_with_metadata`. + """ + return response, metadata + def pre_update_artifact( self, request: metadata_service.UpdateArtifactRequest, @@ -1101,12 +1804,35 @@ def post_update_artifact( ) -> gca_artifact.Artifact: """Post-rpc interceptor for update_artifact - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_artifact_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_update_artifact` interceptor runs + before the `post_update_artifact_with_metadata` interceptor. """ return response + def post_update_artifact_with_metadata( + self, + response: gca_artifact.Artifact, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_artifact.Artifact, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_artifact + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_update_artifact_with_metadata` + interceptor in new development instead of the `post_update_artifact` interceptor. + When both interceptors are used, this `post_update_artifact_with_metadata` interceptor runs after the + `post_update_artifact` interceptor. The (possibly modified) response returned by + `post_update_artifact` will be passed to + `post_update_artifact_with_metadata`. + """ + return response, metadata + def pre_update_context( self, request: metadata_service.UpdateContextRequest, @@ -1124,12 +1850,35 @@ def pre_update_context( def post_update_context(self, response: gca_context.Context) -> gca_context.Context: """Post-rpc interceptor for update_context - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_context_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_update_context` interceptor runs + before the `post_update_context_with_metadata` interceptor. """ return response + def post_update_context_with_metadata( + self, + response: gca_context.Context, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_context.Context, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_context + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_update_context_with_metadata` + interceptor in new development instead of the `post_update_context` interceptor. + When both interceptors are used, this `post_update_context_with_metadata` interceptor runs after the + `post_update_context` interceptor. The (possibly modified) response returned by + `post_update_context` will be passed to + `post_update_context_with_metadata`. + """ + return response, metadata + def pre_update_execution( self, request: metadata_service.UpdateExecutionRequest, @@ -1149,12 +1898,35 @@ def post_update_execution( ) -> gca_execution.Execution: """Post-rpc interceptor for update_execution - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_execution_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_update_execution` interceptor runs + before the `post_update_execution_with_metadata` interceptor. """ return response + def post_update_execution_with_metadata( + self, + response: gca_execution.Execution, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_execution.Execution, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_execution + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_update_execution_with_metadata` + interceptor in new development instead of the `post_update_execution` interceptor. + When both interceptors are used, this `post_update_execution_with_metadata` interceptor runs after the + `post_update_execution` interceptor. The (possibly modified) response returned by + `post_update_execution` will be passed to + `post_update_execution_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1444,9 +2216,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1657,6 +2430,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1793,6 +2570,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1801,6 +2582,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1841,6 +2630,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2003,6 +2796,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2031,6 +2828,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2143,6 +2944,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2151,6 +2956,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2211,6 +3024,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2373,6 +3190,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2413,6 +3234,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2525,6 +3350,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2533,6 +3362,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2593,6 +3430,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2751,6 +3592,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2791,6 +3636,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2903,6 +3752,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2923,6 +3780,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2971,6 +3832,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3137,6 +4002,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3169,6 +4038,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3277,6 +4150,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3285,6 +4162,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3345,6 +4230,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3429,11 +4318,10 @@ def __call__( _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_add_context_artifacts_and_executions( - request, metadata + request, metadata = ( + self._interceptor.pre_add_context_artifacts_and_executions( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_transcoded_request( http_options, request @@ -3498,6 +4386,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_add_context_artifacts_and_executions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_add_context_artifacts_and_executions_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3653,6 +4547,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_add_context_children(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_add_context_children_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3808,6 +4706,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_add_execution_events(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_add_execution_events_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3958,6 +4860,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_artifact(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_artifact_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4106,6 +5012,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_context(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_context_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4256,6 +5166,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_execution(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_execution_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4407,6 +5321,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_metadata_schema(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_metadata_schema_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4521,7 +5439,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4561,6 +5479,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_metadata_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_metadata_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4665,7 +5587,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4704,6 +5626,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_artifact(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_artifact_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4808,7 +5734,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4847,6 +5773,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_context(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_context_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4953,7 +5883,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4992,6 +5922,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_execution(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_execution_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5099,7 +6033,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5138,6 +6072,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_metadata_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_metadata_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5280,6 +6218,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_artifact(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_artifact_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5422,6 +6364,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_context(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_context_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5564,6 +6510,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_execution(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_execution_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5709,6 +6659,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_metadata_schema(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_metadata_schema_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5856,6 +6810,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_metadata_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_metadata_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6000,6 +6958,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_artifacts(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_artifacts_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6146,6 +7108,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_contexts(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_contexts_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6292,6 +7258,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_executions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_executions_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6441,6 +7411,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_metadata_schemas(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_metadata_schemas_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6590,6 +7564,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_metadata_stores(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_metadata_stores_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6701,7 +7679,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6741,6 +7719,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_purge_artifacts(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_purge_artifacts_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6850,7 +7832,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -6890,6 +7872,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_purge_contexts(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_purge_contexts_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7001,7 +7987,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -7041,6 +8027,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_purge_executions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_purge_executions_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7190,6 +8180,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_query_artifact_lineage_subgraph(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7343,6 +8339,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_query_context_lineage_subgraph(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_query_context_lineage_subgraph_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7433,11 +8435,10 @@ def __call__( _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_query_execution_inputs_and_outputs( - request, metadata + request, metadata = ( + self._interceptor.pre_query_execution_inputs_and_outputs( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_transcoded_request( http_options, request @@ -7497,6 +8498,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_query_execution_inputs_and_outputs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7654,6 +8661,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_remove_context_children(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_remove_context_children_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7804,6 +8815,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_artifact(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_artifact_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -7952,6 +8967,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_context(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_context_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -8102,6 +9121,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_execution(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_execution_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -8467,7 +9490,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -8609,7 +9631,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -8751,7 +9772,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -8894,7 +9914,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -9042,7 +10061,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -9186,7 +10204,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -9302,7 +10319,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -9418,7 +10434,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -9560,7 +10575,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -9702,7 +10716,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py index c4cd370ad3..32b85ae2a4 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -91,6 +91,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncMetadataServiceRestInterceptor: """Asynchronous Interceptor for MetadataService. @@ -389,12 +392,38 @@ async def post_add_context_artifacts_and_executions( ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: """Post-rpc interceptor for add_context_artifacts_and_executions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_add_context_artifacts_and_executions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_add_context_artifacts_and_executions` interceptor runs + before the `post_add_context_artifacts_and_executions_with_metadata` interceptor. """ return response + async def post_add_context_artifacts_and_executions_with_metadata( + self, + response: metadata_service.AddContextArtifactsAndExecutionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.AddContextArtifactsAndExecutionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for add_context_artifacts_and_executions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_add_context_artifacts_and_executions_with_metadata` + interceptor in new development instead of the `post_add_context_artifacts_and_executions` interceptor. + When both interceptors are used, this `post_add_context_artifacts_and_executions_with_metadata` interceptor runs after the + `post_add_context_artifacts_and_executions` interceptor. The (possibly modified) response returned by + `post_add_context_artifacts_and_executions` will be passed to + `post_add_context_artifacts_and_executions_with_metadata`. + """ + return response, metadata + async def pre_add_context_children( self, request: metadata_service.AddContextChildrenRequest, @@ -415,12 +444,38 @@ async def post_add_context_children( ) -> metadata_service.AddContextChildrenResponse: """Post-rpc interceptor for add_context_children - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_add_context_children_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_add_context_children` interceptor runs + before the `post_add_context_children_with_metadata` interceptor. """ return response + async def post_add_context_children_with_metadata( + self, + response: metadata_service.AddContextChildrenResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.AddContextChildrenResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for add_context_children + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_add_context_children_with_metadata` + interceptor in new development instead of the `post_add_context_children` interceptor. + When both interceptors are used, this `post_add_context_children_with_metadata` interceptor runs after the + `post_add_context_children` interceptor. The (possibly modified) response returned by + `post_add_context_children` will be passed to + `post_add_context_children_with_metadata`. + """ + return response, metadata + async def pre_add_execution_events( self, request: metadata_service.AddExecutionEventsRequest, @@ -441,12 +496,38 @@ async def post_add_execution_events( ) -> metadata_service.AddExecutionEventsResponse: """Post-rpc interceptor for add_execution_events - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_add_execution_events_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_add_execution_events` interceptor runs + before the `post_add_execution_events_with_metadata` interceptor. """ return response + async def post_add_execution_events_with_metadata( + self, + response: metadata_service.AddExecutionEventsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.AddExecutionEventsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for add_execution_events + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_add_execution_events_with_metadata` + interceptor in new development instead of the `post_add_execution_events` interceptor. + When both interceptors are used, this `post_add_execution_events_with_metadata` interceptor runs after the + `post_add_execution_events` interceptor. The (possibly modified) response returned by + `post_add_execution_events` will be passed to + `post_add_execution_events_with_metadata`. + """ + return response, metadata + async def pre_create_artifact( self, request: metadata_service.CreateArtifactRequest, @@ -466,12 +547,35 @@ async def post_create_artifact( ) -> gca_artifact.Artifact: """Post-rpc interceptor for create_artifact - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_artifact_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_artifact` interceptor runs + before the `post_create_artifact_with_metadata` interceptor. """ return response + async def post_create_artifact_with_metadata( + self, + response: gca_artifact.Artifact, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_artifact.Artifact, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_artifact + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_artifact_with_metadata` + interceptor in new development instead of the `post_create_artifact` interceptor. + When both interceptors are used, this `post_create_artifact_with_metadata` interceptor runs after the + `post_create_artifact` interceptor. The (possibly modified) response returned by + `post_create_artifact` will be passed to + `post_create_artifact_with_metadata`. + """ + return response, metadata + async def pre_create_context( self, request: metadata_service.CreateContextRequest, @@ -491,12 +595,35 @@ async def post_create_context( ) -> gca_context.Context: """Post-rpc interceptor for create_context - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_context_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_context` interceptor runs + before the `post_create_context_with_metadata` interceptor. """ return response + async def post_create_context_with_metadata( + self, + response: gca_context.Context, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_context.Context, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_context + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_context_with_metadata` + interceptor in new development instead of the `post_create_context` interceptor. + When both interceptors are used, this `post_create_context_with_metadata` interceptor runs after the + `post_create_context` interceptor. The (possibly modified) response returned by + `post_create_context` will be passed to + `post_create_context_with_metadata`. + """ + return response, metadata + async def pre_create_execution( self, request: metadata_service.CreateExecutionRequest, @@ -516,12 +643,35 @@ async def post_create_execution( ) -> gca_execution.Execution: """Post-rpc interceptor for create_execution - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_execution_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_execution` interceptor runs + before the `post_create_execution_with_metadata` interceptor. """ return response + async def post_create_execution_with_metadata( + self, + response: gca_execution.Execution, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_execution.Execution, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_execution + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_execution_with_metadata` + interceptor in new development instead of the `post_create_execution` interceptor. + When both interceptors are used, this `post_create_execution_with_metadata` interceptor runs after the + `post_create_execution` interceptor. The (possibly modified) response returned by + `post_create_execution` will be passed to + `post_create_execution_with_metadata`. + """ + return response, metadata + async def pre_create_metadata_schema( self, request: metadata_service.CreateMetadataSchemaRequest, @@ -542,12 +692,37 @@ async def post_create_metadata_schema( ) -> gca_metadata_schema.MetadataSchema: """Post-rpc interceptor for create_metadata_schema - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_metadata_schema_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_metadata_schema` interceptor runs + before the `post_create_metadata_schema_with_metadata` interceptor. """ return response + async def post_create_metadata_schema_with_metadata( + self, + response: gca_metadata_schema.MetadataSchema, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_metadata_schema.MetadataSchema, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_metadata_schema + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_metadata_schema_with_metadata` + interceptor in new development instead of the `post_create_metadata_schema` interceptor. + When both interceptors are used, this `post_create_metadata_schema_with_metadata` interceptor runs after the + `post_create_metadata_schema` interceptor. The (possibly modified) response returned by + `post_create_metadata_schema` will be passed to + `post_create_metadata_schema_with_metadata`. + """ + return response, metadata + async def pre_create_metadata_store( self, request: metadata_service.CreateMetadataStoreRequest, @@ -568,12 +743,35 @@ async def post_create_metadata_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_metadata_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_metadata_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_create_metadata_store` interceptor runs + before the `post_create_metadata_store_with_metadata` interceptor. """ return response + async def post_create_metadata_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_metadata_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_create_metadata_store_with_metadata` + interceptor in new development instead of the `post_create_metadata_store` interceptor. + When both interceptors are used, this `post_create_metadata_store_with_metadata` interceptor runs after the + `post_create_metadata_store` interceptor. The (possibly modified) response returned by + `post_create_metadata_store` will be passed to + `post_create_metadata_store_with_metadata`. + """ + return response, metadata + async def pre_delete_artifact( self, request: metadata_service.DeleteArtifactRequest, @@ -593,12 +791,35 @@ async def post_delete_artifact( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_artifact - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_artifact_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_delete_artifact` interceptor runs + before the `post_delete_artifact_with_metadata` interceptor. """ return response + async def post_delete_artifact_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_artifact + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_delete_artifact_with_metadata` + interceptor in new development instead of the `post_delete_artifact` interceptor. + When both interceptors are used, this `post_delete_artifact_with_metadata` interceptor runs after the + `post_delete_artifact` interceptor. The (possibly modified) response returned by + `post_delete_artifact` will be passed to + `post_delete_artifact_with_metadata`. + """ + return response, metadata + async def pre_delete_context( self, request: metadata_service.DeleteContextRequest, @@ -618,12 +839,35 @@ async def post_delete_context( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_context - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_context_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_delete_context` interceptor runs + before the `post_delete_context_with_metadata` interceptor. """ return response + async def post_delete_context_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_context + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_delete_context_with_metadata` + interceptor in new development instead of the `post_delete_context` interceptor. + When both interceptors are used, this `post_delete_context_with_metadata` interceptor runs after the + `post_delete_context` interceptor. The (possibly modified) response returned by + `post_delete_context` will be passed to + `post_delete_context_with_metadata`. + """ + return response, metadata + async def pre_delete_execution( self, request: metadata_service.DeleteExecutionRequest, @@ -643,12 +887,35 @@ async def post_delete_execution( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_execution - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_execution_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_delete_execution` interceptor runs + before the `post_delete_execution_with_metadata` interceptor. """ return response + async def post_delete_execution_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_execution + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_delete_execution_with_metadata` + interceptor in new development instead of the `post_delete_execution` interceptor. + When both interceptors are used, this `post_delete_execution_with_metadata` interceptor runs after the + `post_delete_execution` interceptor. The (possibly modified) response returned by + `post_delete_execution` will be passed to + `post_delete_execution_with_metadata`. + """ + return response, metadata + async def pre_delete_metadata_store( self, request: metadata_service.DeleteMetadataStoreRequest, @@ -669,12 +936,35 @@ async def post_delete_metadata_store( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_metadata_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_metadata_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_delete_metadata_store` interceptor runs + before the `post_delete_metadata_store_with_metadata` interceptor. """ return response + async def post_delete_metadata_store_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_metadata_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_delete_metadata_store_with_metadata` + interceptor in new development instead of the `post_delete_metadata_store` interceptor. + When both interceptors are used, this `post_delete_metadata_store_with_metadata` interceptor runs after the + `post_delete_metadata_store` interceptor. The (possibly modified) response returned by + `post_delete_metadata_store` will be passed to + `post_delete_metadata_store_with_metadata`. + """ + return response, metadata + async def pre_get_artifact( self, request: metadata_service.GetArtifactRequest, @@ -692,12 +982,35 @@ async def pre_get_artifact( async def post_get_artifact(self, response: artifact.Artifact) -> artifact.Artifact: """Post-rpc interceptor for get_artifact - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_artifact_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_artifact` interceptor runs + before the `post_get_artifact_with_metadata` interceptor. """ return response + async def post_get_artifact_with_metadata( + self, + response: artifact.Artifact, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[artifact.Artifact, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_artifact + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_artifact_with_metadata` + interceptor in new development instead of the `post_get_artifact` interceptor. + When both interceptors are used, this `post_get_artifact_with_metadata` interceptor runs after the + `post_get_artifact` interceptor. The (possibly modified) response returned by + `post_get_artifact` will be passed to + `post_get_artifact_with_metadata`. + """ + return response, metadata + async def pre_get_context( self, request: metadata_service.GetContextRequest, @@ -715,12 +1028,35 @@ async def pre_get_context( async def post_get_context(self, response: context.Context) -> context.Context: """Post-rpc interceptor for get_context - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_context_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_context` interceptor runs + before the `post_get_context_with_metadata` interceptor. """ return response + async def post_get_context_with_metadata( + self, + response: context.Context, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[context.Context, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_context + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_context_with_metadata` + interceptor in new development instead of the `post_get_context` interceptor. + When both interceptors are used, this `post_get_context_with_metadata` interceptor runs after the + `post_get_context` interceptor. The (possibly modified) response returned by + `post_get_context` will be passed to + `post_get_context_with_metadata`. + """ + return response, metadata + async def pre_get_execution( self, request: metadata_service.GetExecutionRequest, @@ -740,12 +1076,35 @@ async def post_get_execution( ) -> execution.Execution: """Post-rpc interceptor for get_execution - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_execution_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_execution` interceptor runs + before the `post_get_execution_with_metadata` interceptor. """ return response + async def post_get_execution_with_metadata( + self, + response: execution.Execution, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[execution.Execution, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_execution + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_execution_with_metadata` + interceptor in new development instead of the `post_get_execution` interceptor. + When both interceptors are used, this `post_get_execution_with_metadata` interceptor runs after the + `post_get_execution` interceptor. The (possibly modified) response returned by + `post_get_execution` will be passed to + `post_get_execution_with_metadata`. + """ + return response, metadata + async def pre_get_metadata_schema( self, request: metadata_service.GetMetadataSchemaRequest, @@ -766,12 +1125,35 @@ async def post_get_metadata_schema( ) -> metadata_schema.MetadataSchema: """Post-rpc interceptor for get_metadata_schema - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_metadata_schema_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_metadata_schema` interceptor runs + before the `post_get_metadata_schema_with_metadata` interceptor. """ return response + async def post_get_metadata_schema_with_metadata( + self, + response: metadata_schema.MetadataSchema, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[metadata_schema.MetadataSchema, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_metadata_schema + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_metadata_schema_with_metadata` + interceptor in new development instead of the `post_get_metadata_schema` interceptor. + When both interceptors are used, this `post_get_metadata_schema_with_metadata` interceptor runs after the + `post_get_metadata_schema` interceptor. The (possibly modified) response returned by + `post_get_metadata_schema` will be passed to + `post_get_metadata_schema_with_metadata`. + """ + return response, metadata + async def pre_get_metadata_store( self, request: metadata_service.GetMetadataStoreRequest, @@ -792,12 +1174,35 @@ async def post_get_metadata_store( ) -> metadata_store.MetadataStore: """Post-rpc interceptor for get_metadata_store - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_metadata_store_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_get_metadata_store` interceptor runs + before the `post_get_metadata_store_with_metadata` interceptor. """ return response + async def post_get_metadata_store_with_metadata( + self, + response: metadata_store.MetadataStore, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[metadata_store.MetadataStore, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_metadata_store + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_get_metadata_store_with_metadata` + interceptor in new development instead of the `post_get_metadata_store` interceptor. + When both interceptors are used, this `post_get_metadata_store_with_metadata` interceptor runs after the + `post_get_metadata_store` interceptor. The (possibly modified) response returned by + `post_get_metadata_store` will be passed to + `post_get_metadata_store_with_metadata`. + """ + return response, metadata + async def pre_list_artifacts( self, request: metadata_service.ListArtifactsRequest, @@ -817,12 +1222,37 @@ async def post_list_artifacts( ) -> metadata_service.ListArtifactsResponse: """Post-rpc interceptor for list_artifacts - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_artifacts_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_artifacts` interceptor runs + before the `post_list_artifacts_with_metadata` interceptor. """ return response + async def post_list_artifacts_with_metadata( + self, + response: metadata_service.ListArtifactsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListArtifactsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_artifacts + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_artifacts_with_metadata` + interceptor in new development instead of the `post_list_artifacts` interceptor. + When both interceptors are used, this `post_list_artifacts_with_metadata` interceptor runs after the + `post_list_artifacts` interceptor. The (possibly modified) response returned by + `post_list_artifacts` will be passed to + `post_list_artifacts_with_metadata`. + """ + return response, metadata + async def pre_list_contexts( self, request: metadata_service.ListContextsRequest, @@ -842,12 +1272,37 @@ async def post_list_contexts( ) -> metadata_service.ListContextsResponse: """Post-rpc interceptor for list_contexts - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_contexts_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_contexts` interceptor runs + before the `post_list_contexts_with_metadata` interceptor. """ return response + async def post_list_contexts_with_metadata( + self, + response: metadata_service.ListContextsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListContextsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_contexts + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_contexts_with_metadata` + interceptor in new development instead of the `post_list_contexts` interceptor. + When both interceptors are used, this `post_list_contexts_with_metadata` interceptor runs after the + `post_list_contexts` interceptor. The (possibly modified) response returned by + `post_list_contexts` will be passed to + `post_list_contexts_with_metadata`. + """ + return response, metadata + async def pre_list_executions( self, request: metadata_service.ListExecutionsRequest, @@ -867,12 +1322,37 @@ async def post_list_executions( ) -> metadata_service.ListExecutionsResponse: """Post-rpc interceptor for list_executions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_executions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_executions` interceptor runs + before the `post_list_executions_with_metadata` interceptor. """ return response + async def post_list_executions_with_metadata( + self, + response: metadata_service.ListExecutionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListExecutionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_executions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_executions_with_metadata` + interceptor in new development instead of the `post_list_executions` interceptor. + When both interceptors are used, this `post_list_executions_with_metadata` interceptor runs after the + `post_list_executions` interceptor. The (possibly modified) response returned by + `post_list_executions` will be passed to + `post_list_executions_with_metadata`. + """ + return response, metadata + async def pre_list_metadata_schemas( self, request: metadata_service.ListMetadataSchemasRequest, @@ -893,12 +1373,38 @@ async def post_list_metadata_schemas( ) -> metadata_service.ListMetadataSchemasResponse: """Post-rpc interceptor for list_metadata_schemas - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_metadata_schemas_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_metadata_schemas` interceptor runs + before the `post_list_metadata_schemas_with_metadata` interceptor. """ return response + async def post_list_metadata_schemas_with_metadata( + self, + response: metadata_service.ListMetadataSchemasResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListMetadataSchemasResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_metadata_schemas + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_metadata_schemas_with_metadata` + interceptor in new development instead of the `post_list_metadata_schemas` interceptor. + When both interceptors are used, this `post_list_metadata_schemas_with_metadata` interceptor runs after the + `post_list_metadata_schemas` interceptor. The (possibly modified) response returned by + `post_list_metadata_schemas` will be passed to + `post_list_metadata_schemas_with_metadata`. + """ + return response, metadata + async def pre_list_metadata_stores( self, request: metadata_service.ListMetadataStoresRequest, @@ -919,12 +1425,38 @@ async def post_list_metadata_stores( ) -> metadata_service.ListMetadataStoresResponse: """Post-rpc interceptor for list_metadata_stores - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_metadata_stores_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_list_metadata_stores` interceptor runs + before the `post_list_metadata_stores_with_metadata` interceptor. """ return response + async def post_list_metadata_stores_with_metadata( + self, + response: metadata_service.ListMetadataStoresResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.ListMetadataStoresResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_metadata_stores + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_list_metadata_stores_with_metadata` + interceptor in new development instead of the `post_list_metadata_stores` interceptor. + When both interceptors are used, this `post_list_metadata_stores_with_metadata` interceptor runs after the + `post_list_metadata_stores` interceptor. The (possibly modified) response returned by + `post_list_metadata_stores` will be passed to + `post_list_metadata_stores_with_metadata`. + """ + return response, metadata + async def pre_purge_artifacts( self, request: metadata_service.PurgeArtifactsRequest, @@ -944,12 +1476,35 @@ async def post_purge_artifacts( ) -> operations_pb2.Operation: """Post-rpc interceptor for purge_artifacts - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_purge_artifacts_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_purge_artifacts` interceptor runs + before the `post_purge_artifacts_with_metadata` interceptor. """ return response + async def post_purge_artifacts_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for purge_artifacts + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_purge_artifacts_with_metadata` + interceptor in new development instead of the `post_purge_artifacts` interceptor. + When both interceptors are used, this `post_purge_artifacts_with_metadata` interceptor runs after the + `post_purge_artifacts` interceptor. The (possibly modified) response returned by + `post_purge_artifacts` will be passed to + `post_purge_artifacts_with_metadata`. + """ + return response, metadata + async def pre_purge_contexts( self, request: metadata_service.PurgeContextsRequest, @@ -969,11 +1524,34 @@ async def post_purge_contexts( ) -> operations_pb2.Operation: """Post-rpc interceptor for purge_contexts - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_purge_contexts_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_purge_contexts` interceptor runs + before the `post_purge_contexts_with_metadata` interceptor. + """ + return response + + async def post_purge_contexts_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for purge_contexts + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_purge_contexts_with_metadata` + interceptor in new development instead of the `post_purge_contexts` interceptor. + When both interceptors are used, this `post_purge_contexts_with_metadata` interceptor runs after the + `post_purge_contexts` interceptor. The (possibly modified) response returned by + `post_purge_contexts` will be passed to + `post_purge_contexts_with_metadata`. """ - return response + return response, metadata async def pre_purge_executions( self, @@ -994,12 +1572,35 @@ async def post_purge_executions( ) -> operations_pb2.Operation: """Post-rpc interceptor for purge_executions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_purge_executions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_purge_executions` interceptor runs + before the `post_purge_executions_with_metadata` interceptor. """ return response + async def post_purge_executions_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for purge_executions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_purge_executions_with_metadata` + interceptor in new development instead of the `post_purge_executions` interceptor. + When both interceptors are used, this `post_purge_executions_with_metadata` interceptor runs after the + `post_purge_executions` interceptor. The (possibly modified) response returned by + `post_purge_executions` will be passed to + `post_purge_executions_with_metadata`. + """ + return response, metadata + async def pre_query_artifact_lineage_subgraph( self, request: metadata_service.QueryArtifactLineageSubgraphRequest, @@ -1020,12 +1621,37 @@ async def post_query_artifact_lineage_subgraph( ) -> lineage_subgraph.LineageSubgraph: """Post-rpc interceptor for query_artifact_lineage_subgraph - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_query_artifact_lineage_subgraph_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_query_artifact_lineage_subgraph` interceptor runs + before the `post_query_artifact_lineage_subgraph_with_metadata` interceptor. """ return response + async def post_query_artifact_lineage_subgraph_with_metadata( + self, + response: lineage_subgraph.LineageSubgraph, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + lineage_subgraph.LineageSubgraph, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for query_artifact_lineage_subgraph + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_query_artifact_lineage_subgraph_with_metadata` + interceptor in new development instead of the `post_query_artifact_lineage_subgraph` interceptor. + When both interceptors are used, this `post_query_artifact_lineage_subgraph_with_metadata` interceptor runs after the + `post_query_artifact_lineage_subgraph` interceptor. The (possibly modified) response returned by + `post_query_artifact_lineage_subgraph` will be passed to + `post_query_artifact_lineage_subgraph_with_metadata`. + """ + return response, metadata + async def pre_query_context_lineage_subgraph( self, request: metadata_service.QueryContextLineageSubgraphRequest, @@ -1046,12 +1672,37 @@ async def post_query_context_lineage_subgraph( ) -> lineage_subgraph.LineageSubgraph: """Post-rpc interceptor for query_context_lineage_subgraph - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_query_context_lineage_subgraph_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_query_context_lineage_subgraph` interceptor runs + before the `post_query_context_lineage_subgraph_with_metadata` interceptor. """ return response + async def post_query_context_lineage_subgraph_with_metadata( + self, + response: lineage_subgraph.LineageSubgraph, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + lineage_subgraph.LineageSubgraph, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for query_context_lineage_subgraph + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_query_context_lineage_subgraph_with_metadata` + interceptor in new development instead of the `post_query_context_lineage_subgraph` interceptor. + When both interceptors are used, this `post_query_context_lineage_subgraph_with_metadata` interceptor runs after the + `post_query_context_lineage_subgraph` interceptor. The (possibly modified) response returned by + `post_query_context_lineage_subgraph` will be passed to + `post_query_context_lineage_subgraph_with_metadata`. + """ + return response, metadata + async def pre_query_execution_inputs_and_outputs( self, request: metadata_service.QueryExecutionInputsAndOutputsRequest, @@ -1072,12 +1723,37 @@ async def post_query_execution_inputs_and_outputs( ) -> lineage_subgraph.LineageSubgraph: """Post-rpc interceptor for query_execution_inputs_and_outputs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_query_execution_inputs_and_outputs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_query_execution_inputs_and_outputs` interceptor runs + before the `post_query_execution_inputs_and_outputs_with_metadata` interceptor. """ return response + async def post_query_execution_inputs_and_outputs_with_metadata( + self, + response: lineage_subgraph.LineageSubgraph, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + lineage_subgraph.LineageSubgraph, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for query_execution_inputs_and_outputs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_query_execution_inputs_and_outputs_with_metadata` + interceptor in new development instead of the `post_query_execution_inputs_and_outputs` interceptor. + When both interceptors are used, this `post_query_execution_inputs_and_outputs_with_metadata` interceptor runs after the + `post_query_execution_inputs_and_outputs` interceptor. The (possibly modified) response returned by + `post_query_execution_inputs_and_outputs` will be passed to + `post_query_execution_inputs_and_outputs_with_metadata`. + """ + return response, metadata + async def pre_remove_context_children( self, request: metadata_service.RemoveContextChildrenRequest, @@ -1098,12 +1774,38 @@ async def post_remove_context_children( ) -> metadata_service.RemoveContextChildrenResponse: """Post-rpc interceptor for remove_context_children - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_remove_context_children_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_remove_context_children` interceptor runs + before the `post_remove_context_children_with_metadata` interceptor. """ return response + async def post_remove_context_children_with_metadata( + self, + response: metadata_service.RemoveContextChildrenResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + metadata_service.RemoveContextChildrenResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for remove_context_children + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_remove_context_children_with_metadata` + interceptor in new development instead of the `post_remove_context_children` interceptor. + When both interceptors are used, this `post_remove_context_children_with_metadata` interceptor runs after the + `post_remove_context_children` interceptor. The (possibly modified) response returned by + `post_remove_context_children` will be passed to + `post_remove_context_children_with_metadata`. + """ + return response, metadata + async def pre_update_artifact( self, request: metadata_service.UpdateArtifactRequest, @@ -1123,12 +1825,35 @@ async def post_update_artifact( ) -> gca_artifact.Artifact: """Post-rpc interceptor for update_artifact - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_artifact_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_update_artifact` interceptor runs + before the `post_update_artifact_with_metadata` interceptor. """ return response + async def post_update_artifact_with_metadata( + self, + response: gca_artifact.Artifact, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_artifact.Artifact, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_artifact + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_update_artifact_with_metadata` + interceptor in new development instead of the `post_update_artifact` interceptor. + When both interceptors are used, this `post_update_artifact_with_metadata` interceptor runs after the + `post_update_artifact` interceptor. The (possibly modified) response returned by + `post_update_artifact` will be passed to + `post_update_artifact_with_metadata`. + """ + return response, metadata + async def pre_update_context( self, request: metadata_service.UpdateContextRequest, @@ -1148,12 +1873,35 @@ async def post_update_context( ) -> gca_context.Context: """Post-rpc interceptor for update_context - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_context_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_update_context` interceptor runs + before the `post_update_context_with_metadata` interceptor. """ return response + async def post_update_context_with_metadata( + self, + response: gca_context.Context, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_context.Context, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_context + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_update_context_with_metadata` + interceptor in new development instead of the `post_update_context` interceptor. + When both interceptors are used, this `post_update_context_with_metadata` interceptor runs after the + `post_update_context` interceptor. The (possibly modified) response returned by + `post_update_context` will be passed to + `post_update_context_with_metadata`. + """ + return response, metadata + async def pre_update_execution( self, request: metadata_service.UpdateExecutionRequest, @@ -1173,12 +1921,35 @@ async def post_update_execution( ) -> gca_execution.Execution: """Post-rpc interceptor for update_execution - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_execution_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MetadataService server but before - it is returned to user code. + it is returned to user code. This `post_update_execution` interceptor runs + before the `post_update_execution_with_metadata` interceptor. """ return response + async def post_update_execution_with_metadata( + self, + response: gca_execution.Execution, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_execution.Execution, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_execution + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MetadataService server but before it is returned to user code. + + We recommend only using this `post_update_execution_with_metadata` + interceptor in new development instead of the `post_update_execution` interceptor. + When both interceptors are used, this `post_update_execution_with_metadata` interceptor runs after the + `post_update_execution` interceptor. The (possibly modified) response returned by + `post_update_execution` will be passed to + `post_update_execution_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1487,9 +2258,9 @@ def __init__( self._interceptor = interceptor or AsyncMetadataServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -1778,11 +2549,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_add_context_artifacts_and_executions( - request, metadata + request, metadata = ( + await self._interceptor.pre_add_context_artifacts_and_executions( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseAddContextArtifactsAndExecutions._get_transcoded_request( http_options, request @@ -1854,6 +2624,12 @@ async def __call__( resp = await self._interceptor.post_add_context_artifacts_and_executions( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_add_context_artifacts_and_executions_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2015,6 +2791,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_add_context_children(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_add_context_children_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2176,6 +2956,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_add_execution_events(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_add_execution_events_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2337,6 +3121,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_artifact(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_artifact_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2496,6 +3284,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_context(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_context_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2655,6 +3447,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_execution(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_execution_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2812,6 +3608,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_metadata_schema(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_metadata_schema_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2927,7 +3727,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2974,6 +3774,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_metadata_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_metadata_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3082,7 +3886,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3130,6 +3934,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_artifact(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_artifact_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3238,7 +4046,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3286,6 +4094,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_context(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_context_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3394,7 +4206,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3442,6 +4254,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_execution(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_execution_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3550,7 +4366,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3596,6 +4412,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_metadata_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_metadata_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3748,6 +4568,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_artifact(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_artifact_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3900,6 +4724,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_context(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_context_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4053,6 +4881,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_execution(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_execution_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4204,6 +5036,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_metadata_schema(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_metadata_schema_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4360,6 +5196,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_metadata_store(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_metadata_store_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4515,6 +5355,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_artifacts(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_artifacts_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4672,6 +5516,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_contexts(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_contexts_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4829,6 +5677,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_executions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_executions_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4984,6 +5836,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_metadata_schemas(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_metadata_schemas_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5139,6 +5995,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_metadata_stores(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_metadata_stores_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5254,7 +6114,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5303,6 +6163,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_purge_artifacts(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_purge_artifacts_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5416,7 +6280,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5465,6 +6329,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_purge_contexts(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_purge_contexts_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5578,7 +6446,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5627,6 +6495,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_purge_executions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_purge_executions_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5718,11 +6590,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryArtifactLineageSubgraph._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_artifact_lineage_subgraph( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_artifact_lineage_subgraph( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryArtifactLineageSubgraph._get_transcoded_request( http_options, request @@ -5787,6 +6658,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_artifact_lineage_subgraph(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_query_artifact_lineage_subgraph_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5878,11 +6755,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryContextLineageSubgraph._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_context_lineage_subgraph( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_context_lineage_subgraph( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryContextLineageSubgraph._get_transcoded_request( http_options, request @@ -5947,6 +6823,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_context_lineage_subgraph(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_query_context_lineage_subgraph_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6040,11 +6922,10 @@ async def __call__( _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_query_execution_inputs_and_outputs( - request, metadata + request, metadata = ( + await self._interceptor.pre_query_execution_inputs_and_outputs( + request, metadata + ) ) transcoded_request = _BaseMetadataServiceRestTransport._BaseQueryExecutionInputsAndOutputs._get_transcoded_request( http_options, request @@ -6109,6 +6990,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_query_execution_inputs_and_outputs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_query_execution_inputs_and_outputs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6270,6 +7157,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_remove_context_children(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_remove_context_children_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6431,6 +7324,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_artifact(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_artifact_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6590,6 +7487,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_context(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_context_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6749,6 +7650,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_execution(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_execution_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -6944,6 +7849,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -7080,6 +7989,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -7088,6 +8001,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7128,6 +8049,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -7290,6 +8215,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -7318,6 +8247,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -7430,6 +8363,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -7438,6 +8375,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7498,6 +8443,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -7660,6 +8609,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -7700,6 +8653,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -7812,6 +8769,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -7820,6 +8781,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7880,6 +8849,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -8038,6 +9011,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -8078,6 +9055,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -8190,6 +9171,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -8210,6 +9199,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -8258,6 +9251,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -8424,6 +9421,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -8456,6 +9457,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -8564,6 +9569,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -8572,6 +9581,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -8632,6 +9649,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -8928,7 +9949,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -9081,7 +10101,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -9234,7 +10253,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -9388,7 +10406,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -9546,7 +10563,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -9697,7 +10713,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -9822,7 +10837,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -9947,7 +10961,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -10100,7 +11113,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -10253,7 +11265,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py index 97dc82fd49..43fccf6373 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1884,6 +1884,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -1970,6 +1974,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -2048,6 +2057,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -2229,6 +2242,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -2365,6 +2382,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -2373,6 +2394,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2413,6 +2442,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -2594,6 +2627,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2622,6 +2659,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2734,6 +2775,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2742,6 +2787,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2802,6 +2855,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2983,6 +3040,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -3023,6 +3084,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -3135,6 +3200,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3143,6 +3212,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3203,6 +3280,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -3380,6 +3461,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -3420,6 +3505,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -3532,6 +3621,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3552,6 +3649,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -3600,6 +3701,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3785,6 +3890,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3817,6 +3926,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3925,6 +4038,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3933,6 +4050,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3993,6 +4118,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/migration_service/__init__.py b/google/cloud/aiplatform_v1/services/migration_service/__init__.py index 26e165d4d3..554bbdcfaf 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/migration_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/google/cloud/aiplatform_v1/services/migration_service/async_client.py index c6b459a6e6..efddaa0c39 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.migration_service import pagers from google.cloud.aiplatform_v1.types import migratable_resource from google.cloud.aiplatform_v1.types import migration_service @@ -53,6 +52,8 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport from .client import MigrationServiceClient @@ -135,7 +136,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: MigrationServiceAsyncClient: The constructed client. """ - return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + MigrationServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(MigrationServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -151,7 +155,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: MigrationServiceAsyncClient: The constructed client. """ - return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + MigrationServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(MigrationServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -294,21 +301,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.MigrationServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.MigrationService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.MigrationService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.MigrationService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.MigrationService", + "credentialsType": None, + } + ), ) async def search_migratable_resources( @@ -388,7 +397,10 @@ async def sample_search_migratable_resources(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -537,7 +549,10 @@ async def sample_batch_migrate_resources(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) + flattened_params = [parent, migrate_resource_requests] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1298,5 +1313,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("MigrationServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 6db106351e..074244583d 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.migration_service import pagers from google.cloud.aiplatform_v1.types import migratable_resource from google.cloud.aiplatform_v1.types import migration_service @@ -67,6 +68,8 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import MigrationServiceGrpcTransport from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport @@ -168,6 +171,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -240,40 +271,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -482,12 +513,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = MigrationServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -495,7 +522,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -527,20 +554,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = MigrationServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -633,6 +654,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -723,11 +771,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = MigrationServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + MigrationServiceClient._read_environment_variables() + ) self._client_cert_source = MigrationServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -840,21 +886,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.MigrationServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.MigrationService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.MigrationService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.MigrationService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.MigrationService", + "credentialsType": None, + } + ), ) def search_migratable_resources( @@ -934,7 +984,10 @@ def sample_search_migratable_resources(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1082,7 +1135,10 @@ def sample_batch_migrate_resources(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) + flattened_params = [parent, migrate_resource_requests] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1189,16 +1245,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1244,16 +1304,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1416,16 +1480,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1537,16 +1605,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -1659,16 +1731,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -1719,16 +1795,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -1774,16 +1854,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -1829,21 +1913,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("MigrationServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/migration_service/pagers.py b/google/cloud/aiplatform_v1/services/migration_service/pagers.py index 3737cd8381..359b902755 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/migration_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py index f73bf0028a..e8dfb59253 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py index d890864a6b..3e22c2cce3 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import migration_service from google.cloud.location import locations_pb2 # type: ignore @@ -37,6 +38,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class MigrationServiceTransport(abc.ABC): """Abstract transport class for MigrationService.""" @@ -68,9 +72,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -83,8 +88,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -99,11 +102,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -255,13 +263,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py index 7dfc259610..83106b1a30 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -75,12 +75,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.MigrationService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -158,9 +157,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -294,9 +294,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -369,12 +370,12 @@ def search_migratable_resources( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "search_migratable_resources" not in self._stubs: - self._stubs[ - "search_migratable_resources" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources", - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + self._stubs["search_migratable_resources"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources", + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) ) return self._stubs["search_migratable_resources"] diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py index add8ee65c1..a1720a702a 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -154,8 +154,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -206,9 +207,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -376,12 +378,12 @@ def search_migratable_resources( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "search_migratable_resources" not in self._stubs: - self._stubs[ - "search_migratable_resources" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources", - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + self._stubs["search_migratable_resources"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources", + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) ) return self._stubs["search_migratable_resources"] diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py index 9693e07d92..0e57f2a19d 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -63,6 +64,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class MigrationServiceRestInterceptor: """Interceptor for MigrationService. @@ -121,12 +125,35 @@ def post_batch_migrate_resources( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_migrate_resources - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_migrate_resources_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MigrationService server but before - it is returned to user code. + it is returned to user code. This `post_batch_migrate_resources` interceptor runs + before the `post_batch_migrate_resources_with_metadata` interceptor. """ return response + def post_batch_migrate_resources_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_migrate_resources + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MigrationService server but before it is returned to user code. + + We recommend only using this `post_batch_migrate_resources_with_metadata` + interceptor in new development instead of the `post_batch_migrate_resources` interceptor. + When both interceptors are used, this `post_batch_migrate_resources_with_metadata` interceptor runs after the + `post_batch_migrate_resources` interceptor. The (possibly modified) response returned by + `post_batch_migrate_resources` will be passed to + `post_batch_migrate_resources_with_metadata`. + """ + return response, metadata + def pre_search_migratable_resources( self, request: migration_service.SearchMigratableResourcesRequest, @@ -147,12 +174,38 @@ def post_search_migratable_resources( ) -> migration_service.SearchMigratableResourcesResponse: """Post-rpc interceptor for search_migratable_resources - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_migratable_resources_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MigrationService server but before - it is returned to user code. + it is returned to user code. This `post_search_migratable_resources` interceptor runs + before the `post_search_migratable_resources_with_metadata` interceptor. """ return response + def post_search_migratable_resources_with_metadata( + self, + response: migration_service.SearchMigratableResourcesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + migration_service.SearchMigratableResourcesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for search_migratable_resources + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MigrationService server but before it is returned to user code. + + We recommend only using this `post_search_migratable_resources_with_metadata` + interceptor in new development instead of the `post_search_migratable_resources` interceptor. + When both interceptors are used, this `post_search_migratable_resources_with_metadata` interceptor runs after the + `post_search_migratable_resources` interceptor. The (possibly modified) response returned by + `post_search_migratable_resources` will be passed to + `post_search_migratable_resources_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -443,9 +496,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -656,6 +710,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -792,6 +850,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -800,6 +862,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -840,6 +910,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1002,6 +1076,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1030,6 +1108,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1142,6 +1224,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1150,6 +1236,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1210,6 +1304,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1372,6 +1470,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1412,6 +1514,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1524,6 +1630,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1532,6 +1642,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1592,6 +1710,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -1750,6 +1872,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1790,6 +1916,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -1902,6 +2032,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -1922,6 +2060,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -1970,6 +2112,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2136,6 +2282,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2168,6 +2318,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2276,6 +2430,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2284,6 +2442,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2344,6 +2510,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2452,7 +2622,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2494,6 +2664,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_batch_migrate_resources(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_batch_migrate_resources_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2650,6 +2824,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_search_migratable_resources(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_search_migratable_resources_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2739,7 +2917,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -2881,7 +3058,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3023,7 +3199,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -3166,7 +3341,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -3314,7 +3488,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -3459,7 +3632,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -3576,7 +3748,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -3692,7 +3863,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -3834,7 +4004,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -3976,7 +4145,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py index 70c052fa7e..f5ccc79485 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -81,6 +81,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncMigrationServiceRestInterceptor: """Asynchronous Interceptor for MigrationService. @@ -139,12 +142,35 @@ async def post_batch_migrate_resources( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_migrate_resources - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_migrate_resources_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MigrationService server but before - it is returned to user code. + it is returned to user code. This `post_batch_migrate_resources` interceptor runs + before the `post_batch_migrate_resources_with_metadata` interceptor. """ return response + async def post_batch_migrate_resources_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_migrate_resources + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MigrationService server but before it is returned to user code. + + We recommend only using this `post_batch_migrate_resources_with_metadata` + interceptor in new development instead of the `post_batch_migrate_resources` interceptor. + When both interceptors are used, this `post_batch_migrate_resources_with_metadata` interceptor runs after the + `post_batch_migrate_resources` interceptor. The (possibly modified) response returned by + `post_batch_migrate_resources` will be passed to + `post_batch_migrate_resources_with_metadata`. + """ + return response, metadata + async def pre_search_migratable_resources( self, request: migration_service.SearchMigratableResourcesRequest, @@ -165,12 +191,38 @@ async def post_search_migratable_resources( ) -> migration_service.SearchMigratableResourcesResponse: """Post-rpc interceptor for search_migratable_resources - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_search_migratable_resources_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the MigrationService server but before - it is returned to user code. + it is returned to user code. This `post_search_migratable_resources` interceptor runs + before the `post_search_migratable_resources_with_metadata` interceptor. """ return response + async def post_search_migratable_resources_with_metadata( + self, + response: migration_service.SearchMigratableResourcesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + migration_service.SearchMigratableResourcesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for search_migratable_resources + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the MigrationService server but before it is returned to user code. + + We recommend only using this `post_search_migratable_resources_with_metadata` + interceptor in new development instead of the `post_search_migratable_resources` interceptor. + When both interceptors are used, this `post_search_migratable_resources_with_metadata` interceptor runs after the + `post_search_migratable_resources` interceptor. The (possibly modified) response returned by + `post_search_migratable_resources` will be passed to + `post_search_migratable_resources_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -480,9 +532,9 @@ def __init__( self._interceptor = interceptor or AsyncMigrationServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -643,7 +695,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -690,6 +742,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_migrate_resources(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_batch_migrate_resources_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -850,6 +908,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_search_migratable_resources(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_search_migratable_resources_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1049,6 +1113,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1185,6 +1253,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1193,6 +1265,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1233,6 +1313,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1395,6 +1479,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1423,6 +1511,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1535,6 +1627,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1543,6 +1639,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1603,6 +1707,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1765,6 +1873,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1805,6 +1917,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1917,6 +2033,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1925,6 +2045,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1985,6 +2113,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2143,6 +2275,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2183,6 +2319,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2295,6 +2435,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2315,6 +2463,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2363,6 +2515,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2529,6 +2685,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2561,6 +2721,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2669,6 +2833,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2677,6 +2845,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2737,6 +2913,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2814,7 +2994,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -2967,7 +3146,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3120,7 +3298,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -3274,7 +3451,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -3432,7 +3608,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -3583,7 +3758,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -3708,7 +3882,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -3833,7 +4006,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -3986,7 +4158,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -4139,7 +4310,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_base.py index 66d34a7bb3..bb363db174 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -328,6 +328,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -414,6 +418,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -492,6 +501,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -673,6 +686,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -809,6 +826,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -817,6 +838,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -857,6 +886,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1038,6 +1071,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1066,6 +1103,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1178,6 +1219,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1186,6 +1231,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1246,6 +1299,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1427,6 +1484,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1467,6 +1528,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1579,6 +1644,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1587,6 +1656,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1647,6 +1724,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1824,6 +1905,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1864,6 +1949,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -1976,6 +2065,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -1996,6 +2093,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2044,6 +2145,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2229,6 +2334,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2261,6 +2370,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2369,6 +2482,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2377,6 +2494,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2437,6 +2562,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/__init__.py b/google/cloud/aiplatform_v1/services/model_garden_service/__init__.py index a8e438066e..8c77df19bb 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py b/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py index 3c6b7fce79..b41cf9695e 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -51,6 +52,8 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore from .transports.base import ModelGardenServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import ModelGardenServiceGrpcAsyncIOTransport from .client import ModelGardenServiceClient @@ -77,6 +80,10 @@ class ModelGardenServiceAsyncClient: _DEFAULT_ENDPOINT_TEMPLATE = ModelGardenServiceClient._DEFAULT_ENDPOINT_TEMPLATE _DEFAULT_UNIVERSE = ModelGardenServiceClient._DEFAULT_UNIVERSE + endpoint_path = staticmethod(ModelGardenServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(ModelGardenServiceClient.parse_endpoint_path) + model_path = staticmethod(ModelGardenServiceClient.model_path) + parse_model_path = staticmethod(ModelGardenServiceClient.parse_model_path) publisher_model_path = staticmethod(ModelGardenServiceClient.publisher_model_path) parse_publisher_model_path = staticmethod( ModelGardenServiceClient.parse_publisher_model_path @@ -123,7 +130,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: ModelGardenServiceAsyncClient: The constructed client. """ - return ModelGardenServiceClient.from_service_account_info.__func__(ModelGardenServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + ModelGardenServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(ModelGardenServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -139,7 +149,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: ModelGardenServiceAsyncClient: The constructed client. """ - return ModelGardenServiceClient.from_service_account_file.__func__(ModelGardenServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + ModelGardenServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(ModelGardenServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -284,21 +297,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.ModelGardenServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "credentialsType": None, + } + ), ) async def get_publisher_model( @@ -367,7 +382,10 @@ async def sample_get_publisher_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -410,6 +428,107 @@ async def sample_get_publisher_model(): # Done; return the response. return response + async def deploy( + self, + request: Optional[Union[model_garden_service.DeployRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a model to a new endpoint. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_deploy(): + # Create a client + client = aiplatform_v1.ModelGardenServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeployRequest( + publisher_model_name="publisher_model_name_value", + destination="destination_value", + ) + + # Make the request + operation = client.deploy(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.DeployRequest, dict]]): + The request object. Request message for + [ModelGardenService.Deploy][google.cloud.aiplatform.v1.ModelGardenService.Deploy]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.DeployResponse` Response message for + [ModelGardenService.Deploy][google.cloud.aiplatform.v1.ModelGardenService.Deploy]. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, model_garden_service.DeployRequest): + request = model_garden_service.DeployRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[self._client._transport.deploy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("destination", request.destination),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_garden_service.DeployResponse, + metadata_type=model_garden_service.DeployOperationMetadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, @@ -1118,5 +1237,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("ModelGardenServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/client.py b/google/cloud/aiplatform_v1/services/model_garden_service/client.py index 317dfae228..65e8390682 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -65,6 +68,8 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore +import google.api_core.operation as operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore from .transports.base import ModelGardenServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ModelGardenServiceGrpcTransport from .transports.grpc_asyncio import ModelGardenServiceGrpcAsyncIOTransport @@ -164,6 +169,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -211,6 +244,50 @@ def transport(self) -> ModelGardenServiceTransport: """ return self._transport + @staticmethod + def endpoint_path( + project: str, + location: str, + endpoint: str, + ) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str, str]: + """Parses a endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_path( + project: str, + location: str, + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def publisher_model_path( publisher: str, @@ -368,12 +445,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = ModelGardenServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -381,7 +454,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -413,20 +486,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = ModelGardenServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -519,6 +586,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -611,11 +705,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ModelGardenServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ModelGardenServiceClient._read_environment_variables() + ) self._client_cert_source = ModelGardenServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -728,21 +820,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.ModelGardenServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "credentialsType": None, + } + ), ) def get_publisher_model( @@ -811,7 +907,10 @@ def sample_get_publisher_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -851,6 +950,107 @@ def sample_get_publisher_model(): # Done; return the response. return response + def deploy( + self, + request: Optional[Union[model_garden_service.DeployRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Deploys a model to a new endpoint. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_deploy(): + # Create a client + client = aiplatform_v1.ModelGardenServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeployRequest( + publisher_model_name="publisher_model_name_value", + destination="destination_value", + ) + + # Make the request + operation = client.deploy(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeployRequest, dict]): + The request object. Request message for + [ModelGardenService.Deploy][google.cloud.aiplatform.v1.ModelGardenService.Deploy]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.DeployResponse` Response message for + [ModelGardenService.Deploy][google.cloud.aiplatform.v1.ModelGardenService.Deploy]. + + """ + # Create or coerce a protobuf request object. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, model_garden_service.DeployRequest): + request = model_garden_service.DeployRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("destination", request.destination),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + model_garden_service.DeployResponse, + metadata_type=model_garden_service.DeployOperationMetadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "ModelGardenServiceClient": return self @@ -908,16 +1108,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -963,16 +1167,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1135,16 +1343,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -1256,16 +1468,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -1378,16 +1594,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -1438,16 +1658,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -1493,16 +1717,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -1548,21 +1776,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("ModelGardenServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/__init__.py index 03865604f6..3a1a431c25 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py index 3e53995cda..7650417275 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,8 +23,10 @@ from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries +from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import model_garden_service from google.cloud.aiplatform_v1.types import publisher_model @@ -37,6 +39,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class ModelGardenServiceTransport(abc.ABC): """Abstract transport class for ModelGardenService.""" @@ -68,9 +73,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -83,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -99,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -139,6 +148,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.deploy: gapic_v1.method.wrap_method( + self.deploy, + default_timeout=None, + client_info=client_info, + ), self.get_location: gapic_v1.method.wrap_method( self.get_location, default_timeout=None, @@ -200,6 +214,11 @@ def close(self): """ raise NotImplementedError() + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + @property def get_publisher_model( self, @@ -211,6 +230,15 @@ def get_publisher_model( ]: raise NotImplementedError() + @property + def deploy( + self, + ) -> Callable[ + [model_garden_service.DeployRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def list_operations( self, @@ -235,13 +263,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc.py index e73731d19b..4680ab0617 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers +from google.api_core import operations_v1 from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore @@ -75,12 +76,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -157,9 +157,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -200,6 +201,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -292,9 +294,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -326,6 +329,22 @@ def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service.""" return self._grpc_channel + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) + + # Return the client from cache. + return self._operations_client + @property def get_publisher_model( self, @@ -354,6 +373,32 @@ def get_publisher_model( ) return self._stubs["get_publisher_model"] + @property + def deploy( + self, + ) -> Callable[[model_garden_service.DeployRequest], operations_pb2.Operation]: + r"""Return a callable for the deploy method over gRPC. + + Deploys a model to a new endpoint. + + Returns: + Callable[[~.DeployRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "deploy" not in self._stubs: + self._stubs["deploy"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelGardenService/Deploy", + request_serializer=model_garden_service.DeployRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["deploy"] + def close(self): self._logged_channel.close() diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc_asyncio.py index 63dfffdc48..11eb2c610c 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ from google.api_core import grpc_helpers_async from google.api_core import exceptions as core_exceptions from google.api_core import retry_async as retries +from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.protobuf.json_format import MessageToJson @@ -153,8 +154,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -205,9 +207,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -249,6 +252,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -333,6 +337,22 @@ def grpc_channel(self) -> aio.Channel: # Return the channel from cache. return self._grpc_channel + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self._logged_channel + ) + + # Return the client from cache. + return self._operations_client + @property def get_publisher_model( self, @@ -362,6 +382,34 @@ def get_publisher_model( ) return self._stubs["get_publisher_model"] + @property + def deploy( + self, + ) -> Callable[ + [model_garden_service.DeployRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the deploy method over gRPC. + + Deploys a model to a new endpoint. + + Returns: + Callable[[~.DeployRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "deploy" not in self._stubs: + self._stubs["deploy"] = self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelGardenService/Deploy", + request_serializer=model_garden_service.DeployRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["deploy"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -370,6 +418,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.deploy: self._wrap_method( + self.deploy, + default_timeout=None, + client_info=client_info, + ), self.get_location: self._wrap_method( self.get_location, default_timeout=None, diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py index 3043dba7ca..0480590a29 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,8 +23,10 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format +from google.api_core import operations_v1 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.cloud.location import locations_pb2 # type: ignore @@ -63,6 +65,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class ModelGardenServiceRestInterceptor: """Interceptor for ModelGardenService. @@ -79,6 +84,14 @@ class ModelGardenServiceRestInterceptor: .. code-block:: python class MyCustomModelGardenServiceInterceptor(ModelGardenServiceRestInterceptor): + def pre_deploy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_deploy(self, response): + logging.log(f"Received response: {response}") + return response + def pre_get_publisher_model(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -93,6 +106,54 @@ def post_get_publisher_model(self, response): """ + def pre_deploy( + self, + request: model_garden_service.DeployRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_garden_service.DeployRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for deploy + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelGardenService server. + """ + return request, metadata + + def post_deploy( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for deploy + + DEPRECATED. Please use the `post_deploy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the ModelGardenService server but before + it is returned to user code. This `post_deploy` interceptor runs + before the `post_deploy_with_metadata` interceptor. + """ + return response + + def post_deploy_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for deploy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelGardenService server but before it is returned to user code. + + We recommend only using this `post_deploy_with_metadata` + interceptor in new development instead of the `post_deploy` interceptor. + When both interceptors are used, this `post_deploy_with_metadata` interceptor runs after the + `post_deploy` interceptor. The (possibly modified) response returned by + `post_deploy` will be passed to + `post_deploy_with_metadata`. + """ + return response, metadata + def pre_get_publisher_model( self, request: model_garden_service.GetPublisherModelRequest, @@ -113,12 +174,35 @@ def post_get_publisher_model( ) -> publisher_model.PublisherModel: """Post-rpc interceptor for get_publisher_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_publisher_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelGardenService server but before - it is returned to user code. + it is returned to user code. This `post_get_publisher_model` interceptor runs + before the `post_get_publisher_model_with_metadata` interceptor. """ return response + def post_get_publisher_model_with_metadata( + self, + response: publisher_model.PublisherModel, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[publisher_model.PublisherModel, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_publisher_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelGardenService server but before it is returned to user code. + + We recommend only using this `post_get_publisher_model_with_metadata` + interceptor in new development instead of the `post_get_publisher_model` interceptor. + When both interceptors are used, this `post_get_publisher_model_with_metadata` interceptor runs after the + `post_get_publisher_model` interceptor. The (possibly modified) response returned by + `post_get_publisher_model` will be passed to + `post_get_publisher_model_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -312,142 +396,2295 @@ def post_get_operation( """ return response - def pre_list_operations( - self, - request: operations_pb2.ListOperationsRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for list_operations + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelGardenService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the ModelGardenService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.WaitOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelGardenService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the ModelGardenService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class ModelGardenServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: ModelGardenServiceRestInterceptor + + +class ModelGardenServiceRestTransport(_BaseModelGardenServiceRestTransport): + """REST backend synchronous transport for ModelGardenService. + + The interface of Model Garden Service. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[ModelGardenServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or ModelGardenServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) - Override in a subclass to manipulate the request or metadata - before they are sent to the ModelGardenService server. - """ - return request, metadata + # Return the client from cache. + return self._operations_client - def post_list_operations( - self, response: operations_pb2.ListOperationsResponse - ) -> operations_pb2.ListOperationsResponse: - """Post-rpc interceptor for list_operations + class _Deploy( + _BaseModelGardenServiceRestTransport._BaseDeploy, ModelGardenServiceRestStub + ): + def __hash__(self): + return hash("ModelGardenServiceRestTransport.Deploy") - Override in a subclass to manipulate the response - after it is returned by the ModelGardenService server but before - it is returned to user code. - """ - return response + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): - def pre_wait_operation( - self, - request: operations_pb2.WaitOperationRequest, - metadata: Sequence[Tuple[str, Union[str, bytes]]], - ) -> Tuple[ - operations_pb2.WaitOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] - ]: - """Pre-rpc interceptor for wait_operation + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response - Override in a subclass to manipulate the request or metadata - before they are sent to the ModelGardenService server. - """ - return request, metadata + def __call__( + self, + request: model_garden_service.DeployRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the deploy method over HTTP. - def post_wait_operation( - self, response: operations_pb2.Operation - ) -> operations_pb2.Operation: - """Post-rpc interceptor for wait_operation + Args: + request (~.model_garden_service.DeployRequest): + The request object. Request message for + [ModelGardenService.Deploy][google.cloud.aiplatform.v1.ModelGardenService.Deploy]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - Override in a subclass to manipulate the response - after it is returned by the ModelGardenService server but before - it is returned to user code. - """ - return response + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + """ -@dataclasses.dataclass -class ModelGardenServiceRestStub: - _session: AuthorizedSession - _host: str - _interceptor: ModelGardenServiceRestInterceptor + http_options = ( + _BaseModelGardenServiceRestTransport._BaseDeploy._get_http_options() + ) + request, metadata = self._interceptor.pre_deploy(request, metadata) + transcoded_request = _BaseModelGardenServiceRestTransport._BaseDeploy._get_transcoded_request( + http_options, request + ) -class ModelGardenServiceRestTransport(_BaseModelGardenServiceRestTransport): - """REST backend synchronous transport for ModelGardenService. + body = ( + _BaseModelGardenServiceRestTransport._BaseDeploy._get_request_body_json( + transcoded_request + ) + ) - The interface of Model Garden Service. + # Jsonify the query params + query_params = ( + _BaseModelGardenServiceRestTransport._BaseDeploy._get_query_params_json( + transcoded_request + ) + ) - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.ModelGardenServiceClient.Deploy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "rpcName": "Deploy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) - It sends JSON representations of protocol buffers over HTTP/1.1 - """ + # Send the request + response = ModelGardenServiceRestTransport._Deploy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) - def __init__( - self, - *, - host: str = "aiplatform.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = "https", - interceptor: Optional[ModelGardenServiceRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) - Args: - host (Optional[str]): - The hostname to connect to (default: 'aiplatform.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - url_scheme=url_scheme, - api_audience=api_audience, - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST - ) - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or ModelGardenServiceRestInterceptor() - self._prep_wrapped_messages(client_info) + resp = self._interceptor.post_deploy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_deploy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.ModelGardenServiceClient.deploy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "rpcName": "Deploy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _GetPublisherModel( _BaseModelGardenServiceRestTransport._BaseGetPublisherModel, @@ -571,6 +2808,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_publisher_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_publisher_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -594,6 +2835,14 @@ def __call__( ) return resp + @property + def deploy( + self, + ) -> Callable[[model_garden_service.DeployRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._Deploy(self._session, self._host, self._interceptor) # type: ignore + @property def get_publisher_model( self, @@ -646,7 +2895,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -789,7 +3037,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -932,7 +3179,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1076,7 +3322,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1224,7 +3469,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1371,7 +3615,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1488,7 +3731,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1605,7 +3847,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -1748,7 +3989,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -1891,7 +4131,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_asyncio.py index 2104caf6f4..8358eb9ca3 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,15 +30,17 @@ from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 +from google.api_core import operations_v1 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.cloud.location import locations_pb2 # type: ignore from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format +from google.api_core import operations_v1 from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.cloud.location import locations_pb2 # type: ignore @@ -80,6 +82,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncModelGardenServiceRestInterceptor: """Asynchronous Interceptor for ModelGardenService. @@ -96,6 +101,14 @@ class AsyncModelGardenServiceRestInterceptor: .. code-block:: python class MyCustomModelGardenServiceInterceptor(ModelGardenServiceRestInterceptor): + async def pre_deploy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_deploy(self, response): + logging.log(f"Received response: {response}") + return response + async def pre_get_publisher_model(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -110,6 +123,54 @@ async def post_get_publisher_model(self, response): """ + async def pre_deploy( + self, + request: model_garden_service.DeployRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_garden_service.DeployRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Pre-rpc interceptor for deploy + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelGardenService server. + """ + return request, metadata + + async def post_deploy( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for deploy + + DEPRECATED. Please use the `post_deploy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the ModelGardenService server but before + it is returned to user code. This `post_deploy` interceptor runs + before the `post_deploy_with_metadata` interceptor. + """ + return response + + async def post_deploy_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for deploy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelGardenService server but before it is returned to user code. + + We recommend only using this `post_deploy_with_metadata` + interceptor in new development instead of the `post_deploy` interceptor. + When both interceptors are used, this `post_deploy_with_metadata` interceptor runs after the + `post_deploy` interceptor. The (possibly modified) response returned by + `post_deploy` will be passed to + `post_deploy_with_metadata`. + """ + return response, metadata + async def pre_get_publisher_model( self, request: model_garden_service.GetPublisherModelRequest, @@ -130,12 +191,35 @@ async def post_get_publisher_model( ) -> publisher_model.PublisherModel: """Post-rpc interceptor for get_publisher_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_publisher_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelGardenService server but before - it is returned to user code. + it is returned to user code. This `post_get_publisher_model` interceptor runs + before the `post_get_publisher_model_with_metadata` interceptor. """ return response + async def post_get_publisher_model_with_metadata( + self, + response: publisher_model.PublisherModel, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[publisher_model.PublisherModel, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_publisher_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelGardenService server but before it is returned to user code. + + We recommend only using this `post_get_publisher_model_with_metadata` + interceptor in new development instead of the `post_get_publisher_model` interceptor. + When both interceptors are used, this `post_get_publisher_model_with_metadata` interceptor runs after the + `post_get_publisher_model` interceptor. The (possibly modified) response returned by + `post_get_publisher_model` will be passed to + `post_get_publisher_model_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -444,6 +528,9 @@ def __init__( self._interceptor = interceptor or AsyncModelGardenServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -453,6 +540,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.deploy: self._wrap_method( + self.deploy, + default_timeout=None, + client_info=client_info, + ), self.get_location: self._wrap_method( self.get_location, default_timeout=None, @@ -510,6 +602,172 @@ def _wrap_method(self, func, *args, **kwargs): kwargs["kind"] = self.kind return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + class _Deploy( + _BaseModelGardenServiceRestTransport._BaseDeploy, + AsyncModelGardenServiceRestStub, + ): + def __hash__(self): + return hash("AsyncModelGardenServiceRestTransport.Deploy") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + async def __call__( + self, + request: model_garden_service.DeployRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the deploy method over HTTP. + + Args: + request (~.model_garden_service.DeployRequest): + The request object. Request message for + [ModelGardenService.Deploy][google.cloud.aiplatform.v1.ModelGardenService.Deploy]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseModelGardenServiceRestTransport._BaseDeploy._get_http_options() + ) + + request, metadata = await self._interceptor.pre_deploy(request, metadata) + transcoded_request = _BaseModelGardenServiceRestTransport._BaseDeploy._get_transcoded_request( + http_options, request + ) + + body = ( + _BaseModelGardenServiceRestTransport._BaseDeploy._get_request_body_json( + transcoded_request + ) + ) + + # Jsonify the query params + query_params = ( + _BaseModelGardenServiceRestTransport._BaseDeploy._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.ModelGardenServiceClient.Deploy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "rpcName": "Deploy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncModelGardenServiceRestTransport._Deploy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = operations_pb2.Operation() + pb_resp = resp + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_deploy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_deploy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.ModelGardenServiceAsyncClient.deploy", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "rpcName": "Deploy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + class _GetPublisherModel( _BaseModelGardenServiceRestTransport._BaseGetPublisherModel, AsyncModelGardenServiceRestStub, @@ -608,58 +866,2061 @@ async def __call__( "httpRequest": http_request, "metadata": http_request["headers"], }, - ) - - # Send the request - response = await AsyncModelGardenServiceRestTransport._GetPublisherModel._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, + ) + + # Send the request + response = await AsyncModelGardenServiceRestTransport._GetPublisherModel._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = publisher_model.PublisherModel() + pb_resp = publisher_model.PublisherModel.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_get_publisher_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_publisher_model_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = publisher_model.PublisherModel.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.ModelGardenServiceAsyncClient.get_publisher_model", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", + "rpcName": "GetPublisherModel", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + + @property + def operations_client(self) -> AsyncOperationsRestClient: + """Create the async client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/agents/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/apps/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/agents/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/apps/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookExecutionJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.AsyncOperationsRestTransport( # type: ignore + host=self._host, + # use the credentials which are saved + credentials=self._credentials, # type: ignore + http_options=http_options, + path_prefix="v1", ) - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - content = await response.read() - payload = json.loads(content.decode("utf-8")) - request_url = "{host}{uri}".format( - host=self._host, uri=transcoded_request["uri"] - ) - method = transcoded_request["method"] - raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + self._operations_client = AsyncOperationsRestClient( + transport=rest_transport + ) - # Return the response - resp = publisher_model.PublisherModel() - pb_resp = publisher_model.PublisherModel.pb(resp) - content = await response.read() - json_format.Parse(content, pb_resp, ignore_unknown_fields=True) - resp = await self._interceptor.post_get_publisher_model(resp) - if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( - logging.DEBUG - ): # pragma: NO COVER - try: - response_payload = publisher_model.PublisherModel.to_json(response) - except: - response_payload = None - http_response = { - "payload": response_payload, - "headers": dict(response.headers), - "status": "OK", # need to obtain this properly - } - _LOGGER.debug( - "Received response for google.cloud.aiplatform_v1.ModelGardenServiceAsyncClient.get_publisher_model", - extra={ - "serviceName": "google.cloud.aiplatform.v1.ModelGardenService", - "rpcName": "GetPublisherModel", - "metadata": http_response["headers"], - "httpResponse": http_response, - }, - ) + # Return the client from cache. + return self._operations_client - return resp + @property + def deploy( + self, + ) -> Callable[[model_garden_service.DeployRequest], operations_pb2.Operation]: + return self._Deploy(self._session, self._host, self._interceptor) # type: ignore @property def get_publisher_model( @@ -711,7 +2972,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -864,7 +3124,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -1017,7 +3276,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -1171,7 +3429,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -1329,7 +3586,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -1480,7 +3736,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -1603,7 +3858,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -1726,7 +3980,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -1879,7 +4132,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -2030,7 +4282,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_base.py index de76f5f6f6..fe184ebd17 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -94,6 +94,63 @@ def __init__( api_audience=api_audience, ) + class _BaseDeploy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{destination=projects/*/locations/*}:deploy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = model_garden_service.DeployRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseModelGardenServiceRestTransport._BaseDeploy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseGetPublisherModel: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -262,6 +319,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -348,6 +409,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -426,6 +492,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -607,6 +677,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -743,6 +817,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -751,6 +829,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -791,6 +877,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -972,6 +1062,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1000,6 +1094,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1112,6 +1210,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1120,6 +1222,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1180,6 +1290,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1361,6 +1475,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1401,6 +1519,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1513,6 +1635,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1521,6 +1647,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1581,6 +1715,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1758,6 +1896,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1798,6 +1940,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -1910,6 +2056,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -1930,6 +2084,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -1978,6 +2136,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2163,6 +2325,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2195,6 +2361,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2303,6 +2473,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2311,6 +2485,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2371,6 +2553,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/model_service/__init__.py b/google/cloud/aiplatform_v1/services/model_service/__init__.py index 7508cedb57..aa17eabb4a 100644 --- a/google/cloud/aiplatform_v1/services/model_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/model_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index 44f08473ef..51d7a27d33 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.model_service import pagers from google.cloud.aiplatform_v1.types import deployed_model_ref from google.cloud.aiplatform_v1.types import encryption_spec @@ -62,10 +61,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport from .client import ModelServiceClient @@ -146,7 +147,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: ModelServiceAsyncClient: The constructed client. """ - return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + ModelServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(ModelServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -162,7 +166,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: ModelServiceAsyncClient: The constructed client. """ - return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + ModelServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(ModelServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -303,21 +310,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.ModelServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.ModelService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.ModelService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.ModelService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.ModelService", + "credentialsType": None, + } + ), ) async def upload_model( @@ -403,7 +412,10 @@ async def sample_upload_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) + flattened_params = [parent, model] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -530,7 +542,10 @@ async def sample_get_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -643,7 +658,10 @@ async def sample_list_models(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -766,7 +784,10 @@ async def sample_list_model_versions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -820,6 +841,141 @@ async def sample_list_model_versions(): # Done; return the response. return response + async def list_model_version_checkpoints( + self, + request: Optional[ + Union[model_service.ListModelVersionCheckpointsRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListModelVersionCheckpointsAsyncPager: + r"""Lists checkpoints of the specified model version. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_model_version_checkpoints(): + # Create a client + client = aiplatform_v1.ModelServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelVersionCheckpointsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_version_checkpoints(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsRequest, dict]]): + The request object. Request message for + [ModelService.ListModelVersionCheckpoints][google.cloud.aiplatform.v1.ModelService.ListModelVersionCheckpoints]. + name (:class:`str`): + Required. The name of the model version to list + checkpoints for. + ``projects/{project}/locations/{location}/models/{model}@{version}`` + Example: + ``projects/{project}/locations/{location}/models/{model}@2`` + or + ``projects/{project}/locations/{location}/models/{model}@golden`` + If no version ID or alias is specified, the latest + version will be used. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionCheckpointsAsyncPager: + Response message for + [ModelService.ListModelVersionCheckpoints][google.cloud.aiplatform.v1.ModelService.ListModelVersionCheckpoints] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, model_service.ListModelVersionCheckpointsRequest): + request = model_service.ListModelVersionCheckpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_model_version_checkpoints + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelVersionCheckpointsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def update_model( self, request: Optional[Union[model_service.UpdateModelRequest, dict]] = None, @@ -916,7 +1072,10 @@ async def sample_update_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) + flattened_params = [model, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1039,7 +1198,10 @@ async def sample_update_explanation_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model]) + flattened_params = [model] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1177,7 +1339,10 @@ async def sample_delete_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1317,7 +1482,10 @@ async def sample_delete_model_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1456,7 +1624,10 @@ async def sample_merge_version_aliases(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, version_aliases]) + flattened_params = [name, version_aliases] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1588,7 +1759,10 @@ async def sample_export_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) + flattened_params = [name, output_config] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1730,7 +1904,10 @@ async def sample_copy_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, source_model]) + flattened_params = [parent, source_model] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1860,7 +2037,10 @@ async def sample_import_model_evaluation(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_evaluation]) + flattened_params = [parent, model_evaluation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1984,7 +2164,10 @@ async def sample_batch_import_model_evaluation_slices(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_evaluation_slices]) + flattened_params = [parent, model_evaluation_slices] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2110,7 +2293,10 @@ async def sample_batch_import_evaluated_annotations(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, evaluated_annotations]) + flattened_params = [parent, evaluated_annotations] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2225,7 +2411,10 @@ async def sample_get_model_evaluation(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2340,7 +2529,10 @@ async def sample_list_model_evaluations(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2464,7 +2656,10 @@ async def sample_get_model_evaluation_slice(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2579,7 +2774,10 @@ async def sample_list_model_evaluation_slices(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3341,5 +3539,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("ModelServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index e77f13eb48..92a1184722 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.model_service import pagers from google.cloud.aiplatform_v1.types import deployed_model_ref from google.cloud.aiplatform_v1.types import encryption_spec @@ -76,10 +77,12 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ModelServiceGrpcTransport from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport @@ -177,6 +180,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -480,12 +511,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = ModelServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -493,7 +520,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -525,20 +552,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = ModelServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -631,6 +652,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -719,11 +767,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = ModelServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + ModelServiceClient._read_environment_variables() + ) self._client_cert_source = ModelServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -832,21 +878,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.ModelServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.ModelService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.ModelService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.ModelService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.ModelService", + "credentialsType": None, + } + ), ) def upload_model( @@ -932,7 +982,10 @@ def sample_upload_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) + flattened_params = [parent, model] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1056,7 +1109,10 @@ def sample_get_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1166,7 +1222,10 @@ def sample_list_models(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1286,7 +1345,10 @@ def sample_list_model_versions(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1337,6 +1399,140 @@ def sample_list_model_versions(): # Done; return the response. return response + def list_model_version_checkpoints( + self, + request: Optional[ + Union[model_service.ListModelVersionCheckpointsRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListModelVersionCheckpointsPager: + r"""Lists checkpoints of the specified model version. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_model_version_checkpoints(): + # Create a client + client = aiplatform_v1.ModelServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListModelVersionCheckpointsRequest( + name="name_value", + ) + + # Make the request + page_result = client.list_model_version_checkpoints(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsRequest, dict]): + The request object. Request message for + [ModelService.ListModelVersionCheckpoints][google.cloud.aiplatform.v1.ModelService.ListModelVersionCheckpoints]. + name (str): + Required. The name of the model version to list + checkpoints for. + ``projects/{project}/locations/{location}/models/{model}@{version}`` + Example: + ``projects/{project}/locations/{location}/models/{model}@2`` + or + ``projects/{project}/locations/{location}/models/{model}@golden`` + If no version ID or alias is specified, the latest + version will be used. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionCheckpointsPager: + Response message for + [ModelService.ListModelVersionCheckpoints][google.cloud.aiplatform.v1.ModelService.ListModelVersionCheckpoints] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, model_service.ListModelVersionCheckpointsRequest): + request = model_service.ListModelVersionCheckpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_model_version_checkpoints + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelVersionCheckpointsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def update_model( self, request: Optional[Union[model_service.UpdateModelRequest, dict]] = None, @@ -1433,7 +1629,10 @@ def sample_update_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) + flattened_params = [model, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1553,7 +1752,10 @@ def sample_update_explanation_dataset(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model]) + flattened_params = [model] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1690,7 +1892,10 @@ def sample_delete_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1827,7 +2032,10 @@ def sample_delete_model_version(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1963,7 +2171,10 @@ def sample_merge_version_aliases(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, version_aliases]) + flattened_params = [name, version_aliases] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2092,7 +2303,10 @@ def sample_export_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) + flattened_params = [name, output_config] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2231,7 +2445,10 @@ def sample_copy_model(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, source_model]) + flattened_params = [parent, source_model] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2358,7 +2575,10 @@ def sample_import_model_evaluation(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_evaluation]) + flattened_params = [parent, model_evaluation] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2479,7 +2699,10 @@ def sample_batch_import_model_evaluation_slices(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_evaluation_slices]) + flattened_params = [parent, model_evaluation_slices] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2604,7 +2827,10 @@ def sample_batch_import_evaluated_annotations(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, evaluated_annotations]) + flattened_params = [parent, evaluated_annotations] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2718,7 +2944,10 @@ def sample_get_model_evaluation(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2830,7 +3059,10 @@ def sample_list_model_evaluations(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2951,7 +3183,10 @@ def sample_get_model_evaluation_slice(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3065,7 +3300,10 @@ def sample_list_model_evaluation_slices(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3175,16 +3413,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -3230,16 +3472,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -3402,16 +3648,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -3523,16 +3773,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -3645,16 +3899,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -3705,16 +3963,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -3760,16 +4022,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -3815,21 +4081,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("ModelServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/model_service/pagers.py b/google/cloud/aiplatform_v1/services/model_service/pagers.py index 03d454f889..9041f8288b 100644 --- a/google/cloud/aiplatform_v1/services/model_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/model_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -355,6 +355,166 @@ def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) +class ListModelVersionCheckpointsPager: + """A pager for iterating through ``list_model_version_checkpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``checkpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelVersionCheckpoints`` requests and continue to iterate + through the ``checkpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., model_service.ListModelVersionCheckpointsResponse], + request: model_service.ListModelVersionCheckpointsRequest, + response: model_service.ListModelVersionCheckpointsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = model_service.ListModelVersionCheckpointsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_service.ListModelVersionCheckpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[model_service.ModelVersionCheckpoint]: + for page in self.pages: + yield from page.checkpoints + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelVersionCheckpointsAsyncPager: + """A pager for iterating through ``list_model_version_checkpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``checkpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelVersionCheckpoints`` requests and continue to iterate + through the ``checkpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[model_service.ListModelVersionCheckpointsResponse] + ], + request: model_service.ListModelVersionCheckpointsRequest, + response: model_service.ListModelVersionCheckpointsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelVersionCheckpointsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = model_service.ListModelVersionCheckpointsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[model_service.ListModelVersionCheckpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[model_service.ModelVersionCheckpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.checkpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + class ListModelEvaluationsPager: """A pager for iterating through ``list_model_evaluations`` requests. diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py index 6fb5de709f..5a7ceaa106 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/google/cloud/aiplatform_v1/services/model_service/transports/base.py index fac38705d6..df84dcbee4 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model as gca_model @@ -42,6 +43,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class ModelServiceTransport(abc.ABC): """Abstract transport class for ModelService.""" @@ -73,9 +77,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -88,8 +93,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -104,11 +107,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -159,6 +167,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.list_model_version_checkpoints: gapic_v1.method.wrap_method( + self.list_model_version_checkpoints, + default_timeout=None, + client_info=client_info, + ), self.update_model: gapic_v1.method.wrap_method( self.update_model, default_timeout=None, @@ -336,6 +349,18 @@ def list_model_versions( ]: raise NotImplementedError() + @property + def list_model_version_checkpoints( + self, + ) -> Callable[ + [model_service.ListModelVersionCheckpointsRequest], + Union[ + model_service.ListModelVersionCheckpointsResponse, + Awaitable[model_service.ListModelVersionCheckpointsResponse], + ], + ]: + raise NotImplementedError() + @property def update_model( self, @@ -507,13 +532,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index 9a31d4c9c9..41281c06c9 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -80,12 +80,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.ModelService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -162,9 +161,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -298,9 +298,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -453,6 +454,37 @@ def list_model_versions( ) return self._stubs["list_model_versions"] + @property + def list_model_version_checkpoints( + self, + ) -> Callable[ + [model_service.ListModelVersionCheckpointsRequest], + model_service.ListModelVersionCheckpointsResponse, + ]: + r"""Return a callable for the list model version checkpoints method over gRPC. + + Lists checkpoints of the specified model version. + + Returns: + Callable[[~.ListModelVersionCheckpointsRequest], + ~.ListModelVersionCheckpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_version_checkpoints" not in self._stubs: + self._stubs["list_model_version_checkpoints"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/ListModelVersionCheckpoints", + request_serializer=model_service.ListModelVersionCheckpointsRequest.serialize, + response_deserializer=model_service.ListModelVersionCheckpointsResponse.deserialize, + ) + ) + return self._stubs["list_model_version_checkpoints"] + @property def update_model( self, @@ -501,12 +533,12 @@ def update_explanation_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_explanation_dataset" not in self._stubs: - self._stubs[ - "update_explanation_dataset" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/UpdateExplanationDataset", - request_serializer=model_service.UpdateExplanationDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_explanation_dataset"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/UpdateExplanationDataset", + request_serializer=model_service.UpdateExplanationDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_explanation_dataset"] @@ -715,12 +747,12 @@ def batch_import_model_evaluation_slices( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_import_model_evaluation_slices" not in self._stubs: - self._stubs[ - "batch_import_model_evaluation_slices" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/BatchImportModelEvaluationSlices", - request_serializer=model_service.BatchImportModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.BatchImportModelEvaluationSlicesResponse.deserialize, + self._stubs["batch_import_model_evaluation_slices"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/BatchImportModelEvaluationSlices", + request_serializer=model_service.BatchImportModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.BatchImportModelEvaluationSlicesResponse.deserialize, + ) ) return self._stubs["batch_import_model_evaluation_slices"] @@ -748,12 +780,12 @@ def batch_import_evaluated_annotations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_import_evaluated_annotations" not in self._stubs: - self._stubs[ - "batch_import_evaluated_annotations" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/BatchImportEvaluatedAnnotations", - request_serializer=model_service.BatchImportEvaluatedAnnotationsRequest.serialize, - response_deserializer=model_service.BatchImportEvaluatedAnnotationsResponse.deserialize, + self._stubs["batch_import_evaluated_annotations"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/BatchImportEvaluatedAnnotations", + request_serializer=model_service.BatchImportEvaluatedAnnotationsRequest.serialize, + response_deserializer=model_service.BatchImportEvaluatedAnnotationsResponse.deserialize, + ) ) return self._stubs["batch_import_evaluated_annotations"] @@ -836,12 +868,12 @@ def get_model_evaluation_slice( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model_evaluation_slice" not in self._stubs: - self._stubs[ - "get_model_evaluation_slice" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice", - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + self._stubs["get_model_evaluation_slice"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice", + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) ) return self._stubs["get_model_evaluation_slice"] @@ -867,12 +899,12 @@ def list_model_evaluation_slices( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_model_evaluation_slices" not in self._stubs: - self._stubs[ - "list_model_evaluation_slices" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices", - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + self._stubs["list_model_evaluation_slices"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices", + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) ) return self._stubs["list_model_evaluation_slices"] diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py index 51cbbcc959..07d1a01ff5 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -158,8 +158,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -210,9 +211,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -466,6 +468,37 @@ def list_model_versions( ) return self._stubs["list_model_versions"] + @property + def list_model_version_checkpoints( + self, + ) -> Callable[ + [model_service.ListModelVersionCheckpointsRequest], + Awaitable[model_service.ListModelVersionCheckpointsResponse], + ]: + r"""Return a callable for the list model version checkpoints method over gRPC. + + Lists checkpoints of the specified model version. + + Returns: + Callable[[~.ListModelVersionCheckpointsRequest], + Awaitable[~.ListModelVersionCheckpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_version_checkpoints" not in self._stubs: + self._stubs["list_model_version_checkpoints"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/ListModelVersionCheckpoints", + request_serializer=model_service.ListModelVersionCheckpointsRequest.serialize, + response_deserializer=model_service.ListModelVersionCheckpointsResponse.deserialize, + ) + ) + return self._stubs["list_model_version_checkpoints"] + @property def update_model( self, @@ -515,12 +548,12 @@ def update_explanation_dataset( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_explanation_dataset" not in self._stubs: - self._stubs[ - "update_explanation_dataset" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/UpdateExplanationDataset", - request_serializer=model_service.UpdateExplanationDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_explanation_dataset"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/UpdateExplanationDataset", + request_serializer=model_service.UpdateExplanationDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_explanation_dataset"] @@ -737,12 +770,12 @@ def batch_import_model_evaluation_slices( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_import_model_evaluation_slices" not in self._stubs: - self._stubs[ - "batch_import_model_evaluation_slices" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/BatchImportModelEvaluationSlices", - request_serializer=model_service.BatchImportModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.BatchImportModelEvaluationSlicesResponse.deserialize, + self._stubs["batch_import_model_evaluation_slices"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/BatchImportModelEvaluationSlices", + request_serializer=model_service.BatchImportModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.BatchImportModelEvaluationSlicesResponse.deserialize, + ) ) return self._stubs["batch_import_model_evaluation_slices"] @@ -770,12 +803,12 @@ def batch_import_evaluated_annotations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_import_evaluated_annotations" not in self._stubs: - self._stubs[ - "batch_import_evaluated_annotations" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/BatchImportEvaluatedAnnotations", - request_serializer=model_service.BatchImportEvaluatedAnnotationsRequest.serialize, - response_deserializer=model_service.BatchImportEvaluatedAnnotationsResponse.deserialize, + self._stubs["batch_import_evaluated_annotations"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/BatchImportEvaluatedAnnotations", + request_serializer=model_service.BatchImportEvaluatedAnnotationsRequest.serialize, + response_deserializer=model_service.BatchImportEvaluatedAnnotationsResponse.deserialize, + ) ) return self._stubs["batch_import_evaluated_annotations"] @@ -859,12 +892,12 @@ def get_model_evaluation_slice( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_model_evaluation_slice" not in self._stubs: - self._stubs[ - "get_model_evaluation_slice" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice", - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + self._stubs["get_model_evaluation_slice"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice", + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) ) return self._stubs["get_model_evaluation_slice"] @@ -890,12 +923,12 @@ def list_model_evaluation_slices( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_model_evaluation_slices" not in self._stubs: - self._stubs[ - "list_model_evaluation_slices" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices", - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + self._stubs["list_model_evaluation_slices"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices", + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) ) return self._stubs["list_model_evaluation_slices"] @@ -922,6 +955,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.list_model_version_checkpoints: self._wrap_method( + self.list_model_version_checkpoints, + default_timeout=None, + client_info=client_info, + ), self.update_model: self._wrap_method( self.update_model, default_timeout=None, diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py index 9a6ee9e8e1..4db4a0bcf2 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -68,6 +69,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class ModelServiceRestInterceptor: """Interceptor for ModelService. @@ -188,6 +192,14 @@ def post_list_models(self, response): logging.log(f"Received response: {response}") return response + def pre_list_model_version_checkpoints(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_model_version_checkpoints(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_model_versions(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -254,12 +266,38 @@ def post_batch_import_evaluated_annotations( ) -> model_service.BatchImportEvaluatedAnnotationsResponse: """Post-rpc interceptor for batch_import_evaluated_annotations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_import_evaluated_annotations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_batch_import_evaluated_annotations` interceptor runs + before the `post_batch_import_evaluated_annotations_with_metadata` interceptor. """ return response + def post_batch_import_evaluated_annotations_with_metadata( + self, + response: model_service.BatchImportEvaluatedAnnotationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.BatchImportEvaluatedAnnotationsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for batch_import_evaluated_annotations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_batch_import_evaluated_annotations_with_metadata` + interceptor in new development instead of the `post_batch_import_evaluated_annotations` interceptor. + When both interceptors are used, this `post_batch_import_evaluated_annotations_with_metadata` interceptor runs after the + `post_batch_import_evaluated_annotations` interceptor. The (possibly modified) response returned by + `post_batch_import_evaluated_annotations` will be passed to + `post_batch_import_evaluated_annotations_with_metadata`. + """ + return response, metadata + def pre_batch_import_model_evaluation_slices( self, request: model_service.BatchImportModelEvaluationSlicesRequest, @@ -280,12 +318,38 @@ def post_batch_import_model_evaluation_slices( ) -> model_service.BatchImportModelEvaluationSlicesResponse: """Post-rpc interceptor for batch_import_model_evaluation_slices - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_import_model_evaluation_slices_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_batch_import_model_evaluation_slices` interceptor runs + before the `post_batch_import_model_evaluation_slices_with_metadata` interceptor. """ return response + def post_batch_import_model_evaluation_slices_with_metadata( + self, + response: model_service.BatchImportModelEvaluationSlicesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.BatchImportModelEvaluationSlicesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for batch_import_model_evaluation_slices + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_batch_import_model_evaluation_slices_with_metadata` + interceptor in new development instead of the `post_batch_import_model_evaluation_slices` interceptor. + When both interceptors are used, this `post_batch_import_model_evaluation_slices_with_metadata` interceptor runs after the + `post_batch_import_model_evaluation_slices` interceptor. The (possibly modified) response returned by + `post_batch_import_model_evaluation_slices` will be passed to + `post_batch_import_model_evaluation_slices_with_metadata`. + """ + return response, metadata + def pre_copy_model( self, request: model_service.CopyModelRequest, @@ -303,12 +367,35 @@ def post_copy_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for copy_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_copy_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_copy_model` interceptor runs + before the `post_copy_model_with_metadata` interceptor. """ return response + def post_copy_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for copy_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_copy_model_with_metadata` + interceptor in new development instead of the `post_copy_model` interceptor. + When both interceptors are used, this `post_copy_model_with_metadata` interceptor runs after the + `post_copy_model` interceptor. The (possibly modified) response returned by + `post_copy_model` will be passed to + `post_copy_model_with_metadata`. + """ + return response, metadata + def pre_delete_model( self, request: model_service.DeleteModelRequest, @@ -328,12 +415,35 @@ def post_delete_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_delete_model` interceptor runs + before the `post_delete_model_with_metadata` interceptor. """ return response + def post_delete_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_delete_model_with_metadata` + interceptor in new development instead of the `post_delete_model` interceptor. + When both interceptors are used, this `post_delete_model_with_metadata` interceptor runs after the + `post_delete_model` interceptor. The (possibly modified) response returned by + `post_delete_model` will be passed to + `post_delete_model_with_metadata`. + """ + return response, metadata + def pre_delete_model_version( self, request: model_service.DeleteModelVersionRequest, @@ -353,12 +463,35 @@ def post_delete_model_version( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_model_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_model_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_delete_model_version` interceptor runs + before the `post_delete_model_version_with_metadata` interceptor. """ return response + def post_delete_model_version_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_model_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_delete_model_version_with_metadata` + interceptor in new development instead of the `post_delete_model_version` interceptor. + When both interceptors are used, this `post_delete_model_version_with_metadata` interceptor runs after the + `post_delete_model_version` interceptor. The (possibly modified) response returned by + `post_delete_model_version` will be passed to + `post_delete_model_version_with_metadata`. + """ + return response, metadata + def pre_export_model( self, request: model_service.ExportModelRequest, @@ -378,12 +511,35 @@ def post_export_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for export_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_export_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_export_model` interceptor runs + before the `post_export_model_with_metadata` interceptor. """ return response + def post_export_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for export_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_export_model_with_metadata` + interceptor in new development instead of the `post_export_model` interceptor. + When both interceptors are used, this `post_export_model_with_metadata` interceptor runs after the + `post_export_model` interceptor. The (possibly modified) response returned by + `post_export_model` will be passed to + `post_export_model_with_metadata`. + """ + return response, metadata + def pre_get_model( self, request: model_service.GetModelRequest, @@ -399,12 +555,33 @@ def pre_get_model( def post_get_model(self, response: model.Model) -> model.Model: """Post-rpc interceptor for get_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_get_model` interceptor runs + before the `post_get_model_with_metadata` interceptor. """ return response + def post_get_model_with_metadata( + self, response: model.Model, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[model.Model, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_get_model_with_metadata` + interceptor in new development instead of the `post_get_model` interceptor. + When both interceptors are used, this `post_get_model_with_metadata` interceptor runs after the + `post_get_model` interceptor. The (possibly modified) response returned by + `post_get_model` will be passed to + `post_get_model_with_metadata`. + """ + return response, metadata + def pre_get_model_evaluation( self, request: model_service.GetModelEvaluationRequest, @@ -424,12 +601,37 @@ def post_get_model_evaluation( ) -> model_evaluation.ModelEvaluation: """Post-rpc interceptor for get_model_evaluation - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_model_evaluation_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_get_model_evaluation` interceptor runs + before the `post_get_model_evaluation_with_metadata` interceptor. """ return response + def post_get_model_evaluation_with_metadata( + self, + response: model_evaluation.ModelEvaluation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_evaluation.ModelEvaluation, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_model_evaluation + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_get_model_evaluation_with_metadata` + interceptor in new development instead of the `post_get_model_evaluation` interceptor. + When both interceptors are used, this `post_get_model_evaluation_with_metadata` interceptor runs after the + `post_get_model_evaluation` interceptor. The (possibly modified) response returned by + `post_get_model_evaluation` will be passed to + `post_get_model_evaluation_with_metadata`. + """ + return response, metadata + def pre_get_model_evaluation_slice( self, request: model_service.GetModelEvaluationSliceRequest, @@ -450,12 +652,38 @@ def post_get_model_evaluation_slice( ) -> model_evaluation_slice.ModelEvaluationSlice: """Post-rpc interceptor for get_model_evaluation_slice - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_model_evaluation_slice_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_get_model_evaluation_slice` interceptor runs + before the `post_get_model_evaluation_slice_with_metadata` interceptor. """ return response + def post_get_model_evaluation_slice_with_metadata( + self, + response: model_evaluation_slice.ModelEvaluationSlice, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_evaluation_slice.ModelEvaluationSlice, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_model_evaluation_slice + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_get_model_evaluation_slice_with_metadata` + interceptor in new development instead of the `post_get_model_evaluation_slice` interceptor. + When both interceptors are used, this `post_get_model_evaluation_slice_with_metadata` interceptor runs after the + `post_get_model_evaluation_slice` interceptor. The (possibly modified) response returned by + `post_get_model_evaluation_slice` will be passed to + `post_get_model_evaluation_slice_with_metadata`. + """ + return response, metadata + def pre_import_model_evaluation( self, request: model_service.ImportModelEvaluationRequest, @@ -476,12 +704,37 @@ def post_import_model_evaluation( ) -> gca_model_evaluation.ModelEvaluation: """Post-rpc interceptor for import_model_evaluation - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_import_model_evaluation_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_import_model_evaluation` interceptor runs + before the `post_import_model_evaluation_with_metadata` interceptor. """ return response + def post_import_model_evaluation_with_metadata( + self, + response: gca_model_evaluation.ModelEvaluation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_model_evaluation.ModelEvaluation, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for import_model_evaluation + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_import_model_evaluation_with_metadata` + interceptor in new development instead of the `post_import_model_evaluation` interceptor. + When both interceptors are used, this `post_import_model_evaluation_with_metadata` interceptor runs after the + `post_import_model_evaluation` interceptor. The (possibly modified) response returned by + `post_import_model_evaluation` will be passed to + `post_import_model_evaluation_with_metadata`. + """ + return response, metadata + def pre_list_model_evaluations( self, request: model_service.ListModelEvaluationsRequest, @@ -502,12 +755,38 @@ def post_list_model_evaluations( ) -> model_service.ListModelEvaluationsResponse: """Post-rpc interceptor for list_model_evaluations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_model_evaluations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_list_model_evaluations` interceptor runs + before the `post_list_model_evaluations_with_metadata` interceptor. """ return response + def post_list_model_evaluations_with_metadata( + self, + response: model_service.ListModelEvaluationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelEvaluationsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_model_evaluations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_model_evaluations_with_metadata` + interceptor in new development instead of the `post_list_model_evaluations` interceptor. + When both interceptors are used, this `post_list_model_evaluations_with_metadata` interceptor runs after the + `post_list_model_evaluations` interceptor. The (possibly modified) response returned by + `post_list_model_evaluations` will be passed to + `post_list_model_evaluations_with_metadata`. + """ + return response, metadata + def pre_list_model_evaluation_slices( self, request: model_service.ListModelEvaluationSlicesRequest, @@ -528,12 +807,38 @@ def post_list_model_evaluation_slices( ) -> model_service.ListModelEvaluationSlicesResponse: """Post-rpc interceptor for list_model_evaluation_slices - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_model_evaluation_slices_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_list_model_evaluation_slices` interceptor runs + before the `post_list_model_evaluation_slices_with_metadata` interceptor. """ return response + def post_list_model_evaluation_slices_with_metadata( + self, + response: model_service.ListModelEvaluationSlicesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelEvaluationSlicesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_model_evaluation_slices + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_model_evaluation_slices_with_metadata` + interceptor in new development instead of the `post_list_model_evaluation_slices` interceptor. + When both interceptors are used, this `post_list_model_evaluation_slices_with_metadata` interceptor runs after the + `post_list_model_evaluation_slices` interceptor. The (possibly modified) response returned by + `post_list_model_evaluation_slices` will be passed to + `post_list_model_evaluation_slices_with_metadata`. + """ + return response, metadata + def pre_list_models( self, request: model_service.ListModelsRequest, @@ -553,12 +858,89 @@ def post_list_models( ) -> model_service.ListModelsResponse: """Post-rpc interceptor for list_models - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_models_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_list_models` interceptor runs + before the `post_list_models_with_metadata` interceptor. + """ + return response + + def post_list_models_with_metadata( + self, + response: model_service.ListModelsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_models + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_models_with_metadata` + interceptor in new development instead of the `post_list_models` interceptor. + When both interceptors are used, this `post_list_models_with_metadata` interceptor runs after the + `post_list_models` interceptor. The (possibly modified) response returned by + `post_list_models` will be passed to + `post_list_models_with_metadata`. + """ + return response, metadata + + def pre_list_model_version_checkpoints( + self, + request: model_service.ListModelVersionCheckpointsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelVersionCheckpointsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_model_version_checkpoints + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + def post_list_model_version_checkpoints( + self, response: model_service.ListModelVersionCheckpointsResponse + ) -> model_service.ListModelVersionCheckpointsResponse: + """Post-rpc interceptor for list_model_version_checkpoints + + DEPRECATED. Please use the `post_list_model_version_checkpoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. This `post_list_model_version_checkpoints` interceptor runs + before the `post_list_model_version_checkpoints_with_metadata` interceptor. """ return response + def post_list_model_version_checkpoints_with_metadata( + self, + response: model_service.ListModelVersionCheckpointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelVersionCheckpointsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_model_version_checkpoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_model_version_checkpoints_with_metadata` + interceptor in new development instead of the `post_list_model_version_checkpoints` interceptor. + When both interceptors are used, this `post_list_model_version_checkpoints_with_metadata` interceptor runs after the + `post_list_model_version_checkpoints` interceptor. The (possibly modified) response returned by + `post_list_model_version_checkpoints` will be passed to + `post_list_model_version_checkpoints_with_metadata`. + """ + return response, metadata + def pre_list_model_versions( self, request: model_service.ListModelVersionsRequest, @@ -578,12 +960,37 @@ def post_list_model_versions( ) -> model_service.ListModelVersionsResponse: """Post-rpc interceptor for list_model_versions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_model_versions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_list_model_versions` interceptor runs + before the `post_list_model_versions_with_metadata` interceptor. """ return response + def post_list_model_versions_with_metadata( + self, + response: model_service.ListModelVersionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelVersionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_model_versions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_model_versions_with_metadata` + interceptor in new development instead of the `post_list_model_versions` interceptor. + When both interceptors are used, this `post_list_model_versions_with_metadata` interceptor runs after the + `post_list_model_versions` interceptor. The (possibly modified) response returned by + `post_list_model_versions` will be passed to + `post_list_model_versions_with_metadata`. + """ + return response, metadata + def pre_merge_version_aliases( self, request: model_service.MergeVersionAliasesRequest, @@ -602,12 +1009,33 @@ def pre_merge_version_aliases( def post_merge_version_aliases(self, response: model.Model) -> model.Model: """Post-rpc interceptor for merge_version_aliases - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_merge_version_aliases_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_merge_version_aliases` interceptor runs + before the `post_merge_version_aliases_with_metadata` interceptor. """ return response + def post_merge_version_aliases_with_metadata( + self, response: model.Model, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[model.Model, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for merge_version_aliases + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_merge_version_aliases_with_metadata` + interceptor in new development instead of the `post_merge_version_aliases` interceptor. + When both interceptors are used, this `post_merge_version_aliases_with_metadata` interceptor runs after the + `post_merge_version_aliases` interceptor. The (possibly modified) response returned by + `post_merge_version_aliases` will be passed to + `post_merge_version_aliases_with_metadata`. + """ + return response, metadata + def pre_update_explanation_dataset( self, request: model_service.UpdateExplanationDatasetRequest, @@ -628,12 +1056,35 @@ def post_update_explanation_dataset( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_explanation_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_explanation_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_update_explanation_dataset` interceptor runs + before the `post_update_explanation_dataset_with_metadata` interceptor. """ return response + def post_update_explanation_dataset_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_explanation_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_update_explanation_dataset_with_metadata` + interceptor in new development instead of the `post_update_explanation_dataset` interceptor. + When both interceptors are used, this `post_update_explanation_dataset_with_metadata` interceptor runs after the + `post_update_explanation_dataset` interceptor. The (possibly modified) response returned by + `post_update_explanation_dataset` will be passed to + `post_update_explanation_dataset_with_metadata`. + """ + return response, metadata + def pre_update_model( self, request: model_service.UpdateModelRequest, @@ -651,12 +1102,35 @@ def pre_update_model( def post_update_model(self, response: gca_model.Model) -> gca_model.Model: """Post-rpc interceptor for update_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_update_model` interceptor runs + before the `post_update_model_with_metadata` interceptor. """ return response + def post_update_model_with_metadata( + self, + response: gca_model.Model, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_model.Model, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_update_model_with_metadata` + interceptor in new development instead of the `post_update_model` interceptor. + When both interceptors are used, this `post_update_model_with_metadata` interceptor runs after the + `post_update_model` interceptor. The (possibly modified) response returned by + `post_update_model` will be passed to + `post_update_model_with_metadata`. + """ + return response, metadata + def pre_upload_model( self, request: model_service.UploadModelRequest, @@ -676,12 +1150,35 @@ def post_upload_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for upload_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_upload_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_upload_model` interceptor runs + before the `post_upload_model_with_metadata` interceptor. """ return response + def post_upload_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for upload_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_upload_model_with_metadata` + interceptor in new development instead of the `post_upload_model` interceptor. + When both interceptors are used, this `post_upload_model_with_metadata` interceptor runs after the + `post_upload_model` interceptor. The (possibly modified) response returned by + `post_upload_model` will be passed to + `post_upload_model_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -971,9 +1468,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1184,6 +1682,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1320,6 +1822,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1328,6 +1834,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1368,6 +1882,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1530,6 +2048,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1558,6 +2080,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1670,6 +2196,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1678,6 +2208,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1738,6 +2276,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1900,6 +2442,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1940,6 +2486,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2052,6 +2602,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2060,6 +2614,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2120,6 +2682,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2278,6 +2844,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2318,6 +2888,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2430,6 +3004,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2450,6 +3032,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2498,6 +3084,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2664,6 +3254,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2696,6 +3290,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2804,6 +3402,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2812,6 +3414,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2872,6 +3482,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2956,11 +3570,10 @@ def __call__( _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_import_evaluated_annotations( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_import_evaluated_annotations( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_transcoded_request( http_options, request @@ -3025,6 +3638,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_batch_import_evaluated_annotations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_batch_import_evaluated_annotations_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3117,11 +3736,10 @@ def __call__( _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = self._interceptor.pre_batch_import_model_evaluation_slices( - request, metadata + request, metadata = ( + self._interceptor.pre_batch_import_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_transcoded_request( http_options, request @@ -3186,6 +3804,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_batch_import_model_evaluation_slices(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3303,7 +3927,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3343,6 +3967,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_copy_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_copy_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3451,7 +4079,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3490,6 +4118,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3596,7 +4228,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3635,6 +4267,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_model_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_model_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3750,7 +4386,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3790,6 +4426,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_export_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_export_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3934,6 +4574,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4082,6 +4726,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_model_evaluation(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_model_evaluation_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4234,6 +4882,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_model_evaluation_slice(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_model_evaluation_slice_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4390,6 +5042,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_import_model_evaluation(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_import_model_evaluation_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4538,6 +5194,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_model_evaluations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_model_evaluations_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4690,6 +5350,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_model_evaluation_slices(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_model_evaluation_slices_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4842,6 +5506,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_models(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_models_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4867,6 +5535,166 @@ def __call__( ) return resp + class _ListModelVersionCheckpoints( + _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints, + ModelServiceRestStub, + ): + def __hash__(self): + return hash("ModelServiceRestTransport.ListModelVersionCheckpoints") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: model_service.ListModelVersionCheckpointsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> model_service.ListModelVersionCheckpointsResponse: + r"""Call the list model version + checkpoints method over HTTP. + + Args: + request (~.model_service.ListModelVersionCheckpointsRequest): + The request object. Request message for + [ModelService.ListModelVersionCheckpoints][google.cloud.aiplatform.v1.ModelService.ListModelVersionCheckpoints]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.model_service.ListModelVersionCheckpointsResponse: + Response message for + [ModelService.ListModelVersionCheckpoints][google.cloud.aiplatform.v1.ModelService.ListModelVersionCheckpoints] + + """ + + http_options = ( + _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_model_version_checkpoints( + request, metadata + ) + transcoded_request = _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.ModelServiceClient.ListModelVersionCheckpoints", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelService", + "rpcName": "ListModelVersionCheckpoints", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + ModelServiceRestTransport._ListModelVersionCheckpoints._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_service.ListModelVersionCheckpointsResponse() + pb_resp = model_service.ListModelVersionCheckpointsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_model_version_checkpoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_list_model_version_checkpoints_with_metadata( + resp, response_metadata + ) + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + model_service.ListModelVersionCheckpointsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.ModelServiceClient.list_model_version_checkpoints", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelService", + "rpcName": "ListModelVersionCheckpoints", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + class _ListModelVersions( _BaseModelServiceRestTransport._BaseListModelVersions, ModelServiceRestStub ): @@ -4990,6 +5818,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_model_versions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_model_versions_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5142,6 +5974,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_merge_version_aliases(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_merge_version_aliases_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5255,7 +6091,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5297,6 +6133,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_explanation_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_explanation_dataset_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5451,6 +6291,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5566,7 +6410,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5606,6 +6450,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_upload_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_upload_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5751,6 +6599,17 @@ def list_models( # In C++ this would require a dynamic_cast return self._ListModels(self._session, self._host, self._interceptor) # type: ignore + @property + def list_model_version_checkpoints( + self, + ) -> Callable[ + [model_service.ListModelVersionCheckpointsRequest], + model_service.ListModelVersionCheckpointsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModelVersionCheckpoints(self._session, self._host, self._interceptor) # type: ignore + @property def list_model_versions( self, @@ -5837,7 +6696,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -5983,7 +6841,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -6125,7 +6982,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -6270,7 +7126,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -6421,7 +7276,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -6565,7 +7419,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -6681,7 +7534,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -6797,7 +7649,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -6941,7 +7792,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -7083,7 +7933,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py index a7fd2f169c..7725bf460b 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -86,6 +86,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncModelServiceRestInterceptor: """Asynchronous Interceptor for ModelService. @@ -206,6 +209,14 @@ async def post_list_models(self, response): logging.log(f"Received response: {response}") return response + async def pre_list_model_version_checkpoints(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + async def post_list_model_version_checkpoints(self, response): + logging.log(f"Received response: {response}") + return response + async def pre_list_model_versions(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -272,12 +283,38 @@ async def post_batch_import_evaluated_annotations( ) -> model_service.BatchImportEvaluatedAnnotationsResponse: """Post-rpc interceptor for batch_import_evaluated_annotations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_import_evaluated_annotations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_batch_import_evaluated_annotations` interceptor runs + before the `post_batch_import_evaluated_annotations_with_metadata` interceptor. """ return response + async def post_batch_import_evaluated_annotations_with_metadata( + self, + response: model_service.BatchImportEvaluatedAnnotationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.BatchImportEvaluatedAnnotationsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for batch_import_evaluated_annotations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_batch_import_evaluated_annotations_with_metadata` + interceptor in new development instead of the `post_batch_import_evaluated_annotations` interceptor. + When both interceptors are used, this `post_batch_import_evaluated_annotations_with_metadata` interceptor runs after the + `post_batch_import_evaluated_annotations` interceptor. The (possibly modified) response returned by + `post_batch_import_evaluated_annotations` will be passed to + `post_batch_import_evaluated_annotations_with_metadata`. + """ + return response, metadata + async def pre_batch_import_model_evaluation_slices( self, request: model_service.BatchImportModelEvaluationSlicesRequest, @@ -298,12 +335,38 @@ async def post_batch_import_model_evaluation_slices( ) -> model_service.BatchImportModelEvaluationSlicesResponse: """Post-rpc interceptor for batch_import_model_evaluation_slices - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_import_model_evaluation_slices_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_batch_import_model_evaluation_slices` interceptor runs + before the `post_batch_import_model_evaluation_slices_with_metadata` interceptor. """ return response + async def post_batch_import_model_evaluation_slices_with_metadata( + self, + response: model_service.BatchImportModelEvaluationSlicesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.BatchImportModelEvaluationSlicesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for batch_import_model_evaluation_slices + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_batch_import_model_evaluation_slices_with_metadata` + interceptor in new development instead of the `post_batch_import_model_evaluation_slices` interceptor. + When both interceptors are used, this `post_batch_import_model_evaluation_slices_with_metadata` interceptor runs after the + `post_batch_import_model_evaluation_slices` interceptor. The (possibly modified) response returned by + `post_batch_import_model_evaluation_slices` will be passed to + `post_batch_import_model_evaluation_slices_with_metadata`. + """ + return response, metadata + async def pre_copy_model( self, request: model_service.CopyModelRequest, @@ -321,12 +384,35 @@ async def post_copy_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for copy_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_copy_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_copy_model` interceptor runs + before the `post_copy_model_with_metadata` interceptor. """ return response + async def post_copy_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for copy_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_copy_model_with_metadata` + interceptor in new development instead of the `post_copy_model` interceptor. + When both interceptors are used, this `post_copy_model_with_metadata` interceptor runs after the + `post_copy_model` interceptor. The (possibly modified) response returned by + `post_copy_model` will be passed to + `post_copy_model_with_metadata`. + """ + return response, metadata + async def pre_delete_model( self, request: model_service.DeleteModelRequest, @@ -346,12 +432,35 @@ async def post_delete_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_delete_model` interceptor runs + before the `post_delete_model_with_metadata` interceptor. """ return response + async def post_delete_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_delete_model_with_metadata` + interceptor in new development instead of the `post_delete_model` interceptor. + When both interceptors are used, this `post_delete_model_with_metadata` interceptor runs after the + `post_delete_model` interceptor. The (possibly modified) response returned by + `post_delete_model` will be passed to + `post_delete_model_with_metadata`. + """ + return response, metadata + async def pre_delete_model_version( self, request: model_service.DeleteModelVersionRequest, @@ -371,12 +480,35 @@ async def post_delete_model_version( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_model_version - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_model_version_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_delete_model_version` interceptor runs + before the `post_delete_model_version_with_metadata` interceptor. """ return response + async def post_delete_model_version_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_model_version + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_delete_model_version_with_metadata` + interceptor in new development instead of the `post_delete_model_version` interceptor. + When both interceptors are used, this `post_delete_model_version_with_metadata` interceptor runs after the + `post_delete_model_version` interceptor. The (possibly modified) response returned by + `post_delete_model_version` will be passed to + `post_delete_model_version_with_metadata`. + """ + return response, metadata + async def pre_export_model( self, request: model_service.ExportModelRequest, @@ -396,12 +528,35 @@ async def post_export_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for export_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_export_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_export_model` interceptor runs + before the `post_export_model_with_metadata` interceptor. """ return response + async def post_export_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for export_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_export_model_with_metadata` + interceptor in new development instead of the `post_export_model` interceptor. + When both interceptors are used, this `post_export_model_with_metadata` interceptor runs after the + `post_export_model` interceptor. The (possibly modified) response returned by + `post_export_model` will be passed to + `post_export_model_with_metadata`. + """ + return response, metadata + async def pre_get_model( self, request: model_service.GetModelRequest, @@ -417,12 +572,33 @@ async def pre_get_model( async def post_get_model(self, response: model.Model) -> model.Model: """Post-rpc interceptor for get_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_get_model` interceptor runs + before the `post_get_model_with_metadata` interceptor. """ return response + async def post_get_model_with_metadata( + self, response: model.Model, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[model.Model, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_get_model_with_metadata` + interceptor in new development instead of the `post_get_model` interceptor. + When both interceptors are used, this `post_get_model_with_metadata` interceptor runs after the + `post_get_model` interceptor. The (possibly modified) response returned by + `post_get_model` will be passed to + `post_get_model_with_metadata`. + """ + return response, metadata + async def pre_get_model_evaluation( self, request: model_service.GetModelEvaluationRequest, @@ -442,12 +618,37 @@ async def post_get_model_evaluation( ) -> model_evaluation.ModelEvaluation: """Post-rpc interceptor for get_model_evaluation - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_model_evaluation_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_get_model_evaluation` interceptor runs + before the `post_get_model_evaluation_with_metadata` interceptor. """ return response + async def post_get_model_evaluation_with_metadata( + self, + response: model_evaluation.ModelEvaluation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_evaluation.ModelEvaluation, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_model_evaluation + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_get_model_evaluation_with_metadata` + interceptor in new development instead of the `post_get_model_evaluation` interceptor. + When both interceptors are used, this `post_get_model_evaluation_with_metadata` interceptor runs after the + `post_get_model_evaluation` interceptor. The (possibly modified) response returned by + `post_get_model_evaluation` will be passed to + `post_get_model_evaluation_with_metadata`. + """ + return response, metadata + async def pre_get_model_evaluation_slice( self, request: model_service.GetModelEvaluationSliceRequest, @@ -468,12 +669,38 @@ async def post_get_model_evaluation_slice( ) -> model_evaluation_slice.ModelEvaluationSlice: """Post-rpc interceptor for get_model_evaluation_slice - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_model_evaluation_slice_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_get_model_evaluation_slice` interceptor runs + before the `post_get_model_evaluation_slice_with_metadata` interceptor. """ return response + async def post_get_model_evaluation_slice_with_metadata( + self, + response: model_evaluation_slice.ModelEvaluationSlice, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_evaluation_slice.ModelEvaluationSlice, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_model_evaluation_slice + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_get_model_evaluation_slice_with_metadata` + interceptor in new development instead of the `post_get_model_evaluation_slice` interceptor. + When both interceptors are used, this `post_get_model_evaluation_slice_with_metadata` interceptor runs after the + `post_get_model_evaluation_slice` interceptor. The (possibly modified) response returned by + `post_get_model_evaluation_slice` will be passed to + `post_get_model_evaluation_slice_with_metadata`. + """ + return response, metadata + async def pre_import_model_evaluation( self, request: model_service.ImportModelEvaluationRequest, @@ -494,12 +721,37 @@ async def post_import_model_evaluation( ) -> gca_model_evaluation.ModelEvaluation: """Post-rpc interceptor for import_model_evaluation - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_import_model_evaluation_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_import_model_evaluation` interceptor runs + before the `post_import_model_evaluation_with_metadata` interceptor. """ return response + async def post_import_model_evaluation_with_metadata( + self, + response: gca_model_evaluation.ModelEvaluation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_model_evaluation.ModelEvaluation, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for import_model_evaluation + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_import_model_evaluation_with_metadata` + interceptor in new development instead of the `post_import_model_evaluation` interceptor. + When both interceptors are used, this `post_import_model_evaluation_with_metadata` interceptor runs after the + `post_import_model_evaluation` interceptor. The (possibly modified) response returned by + `post_import_model_evaluation` will be passed to + `post_import_model_evaluation_with_metadata`. + """ + return response, metadata + async def pre_list_model_evaluations( self, request: model_service.ListModelEvaluationsRequest, @@ -520,12 +772,38 @@ async def post_list_model_evaluations( ) -> model_service.ListModelEvaluationsResponse: """Post-rpc interceptor for list_model_evaluations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_model_evaluations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_list_model_evaluations` interceptor runs + before the `post_list_model_evaluations_with_metadata` interceptor. """ return response + async def post_list_model_evaluations_with_metadata( + self, + response: model_service.ListModelEvaluationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelEvaluationsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_model_evaluations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_model_evaluations_with_metadata` + interceptor in new development instead of the `post_list_model_evaluations` interceptor. + When both interceptors are used, this `post_list_model_evaluations_with_metadata` interceptor runs after the + `post_list_model_evaluations` interceptor. The (possibly modified) response returned by + `post_list_model_evaluations` will be passed to + `post_list_model_evaluations_with_metadata`. + """ + return response, metadata + async def pre_list_model_evaluation_slices( self, request: model_service.ListModelEvaluationSlicesRequest, @@ -546,12 +824,38 @@ async def post_list_model_evaluation_slices( ) -> model_service.ListModelEvaluationSlicesResponse: """Post-rpc interceptor for list_model_evaluation_slices - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_model_evaluation_slices_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_list_model_evaluation_slices` interceptor runs + before the `post_list_model_evaluation_slices_with_metadata` interceptor. """ return response + async def post_list_model_evaluation_slices_with_metadata( + self, + response: model_service.ListModelEvaluationSlicesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelEvaluationSlicesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_model_evaluation_slices + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_model_evaluation_slices_with_metadata` + interceptor in new development instead of the `post_list_model_evaluation_slices` interceptor. + When both interceptors are used, this `post_list_model_evaluation_slices_with_metadata` interceptor runs after the + `post_list_model_evaluation_slices` interceptor. The (possibly modified) response returned by + `post_list_model_evaluation_slices` will be passed to + `post_list_model_evaluation_slices_with_metadata`. + """ + return response, metadata + async def pre_list_models( self, request: model_service.ListModelsRequest, @@ -571,12 +875,89 @@ async def post_list_models( ) -> model_service.ListModelsResponse: """Post-rpc interceptor for list_models - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_models_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_list_models` interceptor runs + before the `post_list_models_with_metadata` interceptor. """ return response + async def post_list_models_with_metadata( + self, + response: model_service.ListModelsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_models + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_models_with_metadata` + interceptor in new development instead of the `post_list_models` interceptor. + When both interceptors are used, this `post_list_models_with_metadata` interceptor runs after the + `post_list_models` interceptor. The (possibly modified) response returned by + `post_list_models` will be passed to + `post_list_models_with_metadata`. + """ + return response, metadata + + async def pre_list_model_version_checkpoints( + self, + request: model_service.ListModelVersionCheckpointsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelVersionCheckpointsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_model_version_checkpoints + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelService server. + """ + return request, metadata + + async def post_list_model_version_checkpoints( + self, response: model_service.ListModelVersionCheckpointsResponse + ) -> model_service.ListModelVersionCheckpointsResponse: + """Post-rpc interceptor for list_model_version_checkpoints + + DEPRECATED. Please use the `post_list_model_version_checkpoints_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the ModelService server but before + it is returned to user code. This `post_list_model_version_checkpoints` interceptor runs + before the `post_list_model_version_checkpoints_with_metadata` interceptor. + """ + return response + + async def post_list_model_version_checkpoints_with_metadata( + self, + response: model_service.ListModelVersionCheckpointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelVersionCheckpointsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_model_version_checkpoints + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_model_version_checkpoints_with_metadata` + interceptor in new development instead of the `post_list_model_version_checkpoints` interceptor. + When both interceptors are used, this `post_list_model_version_checkpoints_with_metadata` interceptor runs after the + `post_list_model_version_checkpoints` interceptor. The (possibly modified) response returned by + `post_list_model_version_checkpoints` will be passed to + `post_list_model_version_checkpoints_with_metadata`. + """ + return response, metadata + async def pre_list_model_versions( self, request: model_service.ListModelVersionsRequest, @@ -596,12 +977,37 @@ async def post_list_model_versions( ) -> model_service.ListModelVersionsResponse: """Post-rpc interceptor for list_model_versions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_model_versions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_list_model_versions` interceptor runs + before the `post_list_model_versions_with_metadata` interceptor. """ return response + async def post_list_model_versions_with_metadata( + self, + response: model_service.ListModelVersionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + model_service.ListModelVersionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_model_versions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_list_model_versions_with_metadata` + interceptor in new development instead of the `post_list_model_versions` interceptor. + When both interceptors are used, this `post_list_model_versions_with_metadata` interceptor runs after the + `post_list_model_versions` interceptor. The (possibly modified) response returned by + `post_list_model_versions` will be passed to + `post_list_model_versions_with_metadata`. + """ + return response, metadata + async def pre_merge_version_aliases( self, request: model_service.MergeVersionAliasesRequest, @@ -620,12 +1026,33 @@ async def pre_merge_version_aliases( async def post_merge_version_aliases(self, response: model.Model) -> model.Model: """Post-rpc interceptor for merge_version_aliases - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_merge_version_aliases_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_merge_version_aliases` interceptor runs + before the `post_merge_version_aliases_with_metadata` interceptor. """ return response + async def post_merge_version_aliases_with_metadata( + self, response: model.Model, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[model.Model, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for merge_version_aliases + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_merge_version_aliases_with_metadata` + interceptor in new development instead of the `post_merge_version_aliases` interceptor. + When both interceptors are used, this `post_merge_version_aliases_with_metadata` interceptor runs after the + `post_merge_version_aliases` interceptor. The (possibly modified) response returned by + `post_merge_version_aliases` will be passed to + `post_merge_version_aliases_with_metadata`. + """ + return response, metadata + async def pre_update_explanation_dataset( self, request: model_service.UpdateExplanationDatasetRequest, @@ -646,12 +1073,35 @@ async def post_update_explanation_dataset( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_explanation_dataset - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_explanation_dataset_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_update_explanation_dataset` interceptor runs + before the `post_update_explanation_dataset_with_metadata` interceptor. """ return response + async def post_update_explanation_dataset_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_explanation_dataset + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_update_explanation_dataset_with_metadata` + interceptor in new development instead of the `post_update_explanation_dataset` interceptor. + When both interceptors are used, this `post_update_explanation_dataset_with_metadata` interceptor runs after the + `post_update_explanation_dataset` interceptor. The (possibly modified) response returned by + `post_update_explanation_dataset` will be passed to + `post_update_explanation_dataset_with_metadata`. + """ + return response, metadata + async def pre_update_model( self, request: model_service.UpdateModelRequest, @@ -669,12 +1119,35 @@ async def pre_update_model( async def post_update_model(self, response: gca_model.Model) -> gca_model.Model: """Post-rpc interceptor for update_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_update_model` interceptor runs + before the `post_update_model_with_metadata` interceptor. """ return response + async def post_update_model_with_metadata( + self, + response: gca_model.Model, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_model.Model, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_update_model_with_metadata` + interceptor in new development instead of the `post_update_model` interceptor. + When both interceptors are used, this `post_update_model_with_metadata` interceptor runs after the + `post_update_model` interceptor. The (possibly modified) response returned by + `post_update_model` will be passed to + `post_update_model_with_metadata`. + """ + return response, metadata + async def pre_upload_model( self, request: model_service.UploadModelRequest, @@ -694,12 +1167,35 @@ async def post_upload_model( ) -> operations_pb2.Operation: """Post-rpc interceptor for upload_model - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_upload_model_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the ModelService server but before - it is returned to user code. + it is returned to user code. This `post_upload_model` interceptor runs + before the `post_upload_model_with_metadata` interceptor. """ return response + async def post_upload_model_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for upload_model + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the ModelService server but before it is returned to user code. + + We recommend only using this `post_upload_model_with_metadata` + interceptor in new development instead of the `post_upload_model` interceptor. + When both interceptors are used, this `post_upload_model_with_metadata` interceptor runs after the + `post_upload_model` interceptor. The (possibly modified) response returned by + `post_upload_model` will be passed to + `post_upload_model_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -1008,9 +1504,9 @@ def __init__( self._interceptor = interceptor or AsyncModelServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -1035,6 +1531,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.list_model_version_checkpoints: self._wrap_method( + self.list_model_version_checkpoints, + default_timeout=None, + client_info=client_info, + ), self.update_model: self._wrap_method( self.update_model, default_timeout=None, @@ -1229,11 +1730,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_import_evaluated_annotations( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_import_evaluated_annotations( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportEvaluatedAnnotations._get_transcoded_request( http_options, request @@ -1303,6 +1803,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_import_evaluated_annotations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_batch_import_evaluated_annotations_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1398,11 +1904,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_batch_import_model_evaluation_slices( - request, metadata + request, metadata = ( + await self._interceptor.pre_batch_import_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseBatchImportModelEvaluationSlices._get_transcoded_request( http_options, request @@ -1474,6 +1979,12 @@ async def __call__( resp = await self._interceptor.post_batch_import_model_evaluation_slices( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_batch_import_model_evaluation_slices_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1594,7 +2105,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1641,6 +2152,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_copy_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_copy_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1752,7 +2267,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1798,6 +2313,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1906,7 +2425,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1954,6 +2473,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_model_version(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_model_version_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2072,7 +2595,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2119,6 +2642,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_export_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_export_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2271,6 +2798,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2428,6 +2959,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_model_evaluation(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_model_evaluation_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2586,6 +3121,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_model_evaluation_slice(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_model_evaluation_slice_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2749,6 +3290,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_import_model_evaluation(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_import_model_evaluation_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2904,6 +3451,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_model_evaluations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_model_evaluations_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2994,11 +3545,10 @@ async def __call__( _BaseModelServiceRestTransport._BaseListModelEvaluationSlices._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_model_evaluation_slices( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_model_evaluation_slices( + request, metadata + ) ) transcoded_request = _BaseModelServiceRestTransport._BaseListModelEvaluationSlices._get_transcoded_request( http_options, request @@ -3063,6 +3613,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_model_evaluation_slices(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_model_evaluation_slices_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3223,6 +3779,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_models(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_models_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3249,6 +3809,172 @@ async def __call__( return resp + class _ListModelVersionCheckpoints( + _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints, + AsyncModelServiceRestStub, + ): + def __hash__(self): + return hash("AsyncModelServiceRestTransport.ListModelVersionCheckpoints") + + @staticmethod + async def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = await getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + async def __call__( + self, + request: model_service.ListModelVersionCheckpointsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> model_service.ListModelVersionCheckpointsResponse: + r"""Call the list model version + checkpoints method over HTTP. + + Args: + request (~.model_service.ListModelVersionCheckpointsRequest): + The request object. Request message for + [ModelService.ListModelVersionCheckpoints][google.cloud.aiplatform.v1.ModelService.ListModelVersionCheckpoints]. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.model_service.ListModelVersionCheckpointsResponse: + Response message for + [ModelService.ListModelVersionCheckpoints][google.cloud.aiplatform.v1.ModelService.ListModelVersionCheckpoints] + + """ + + http_options = ( + _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_http_options() + ) + + request, metadata = ( + await self._interceptor.pre_list_model_version_checkpoints( + request, metadata + ) + ) + transcoded_request = _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.cloud.aiplatform_v1.ModelServiceClient.ListModelVersionCheckpoints", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelService", + "rpcName": "ListModelVersionCheckpoints", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = await AsyncModelServiceRestTransport._ListModelVersionCheckpoints._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + resp = model_service.ListModelVersionCheckpointsResponse() + pb_resp = model_service.ListModelVersionCheckpointsResponse.pb(resp) + content = await response.read() + json_format.Parse(content, pb_resp, ignore_unknown_fields=True) + resp = await self._interceptor.post_list_model_version_checkpoints(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_model_version_checkpoints_with_metadata( + resp, response_metadata + ) + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + model_service.ListModelVersionCheckpointsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": "OK", # need to obtain this properly + } + _LOGGER.debug( + "Received response for google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_version_checkpoints", + extra={ + "serviceName": "google.cloud.aiplatform.v1.ModelService", + "rpcName": "ListModelVersionCheckpoints", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + + return resp + class _ListModelVersions( _BaseModelServiceRestTransport._BaseListModelVersions, AsyncModelServiceRestStub ): @@ -3379,6 +4105,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_model_versions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_model_versions_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3540,6 +4270,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_merge_version_aliases(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_merge_version_aliases_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3654,7 +4388,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3701,6 +4435,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_explanation_dataset(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_update_explanation_dataset_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3863,6 +4603,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_update_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3981,7 +4725,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4028,6 +4772,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_upload_model(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_upload_model_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4223,6 +4971,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -4359,6 +5111,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -4367,6 +5123,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4407,6 +5171,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -4569,6 +5337,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -4597,6 +5369,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -4709,6 +5485,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -4717,6 +5497,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4777,6 +5565,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -4939,6 +5731,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -4979,6 +5775,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -5091,6 +5891,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -5099,6 +5903,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5159,6 +5971,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -5317,6 +6133,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -5357,6 +6177,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -5469,6 +6293,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -5489,6 +6321,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -5537,6 +6373,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -5703,6 +6543,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -5735,6 +6579,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -5843,6 +6691,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -5851,6 +6703,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5911,6 +6771,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -6025,6 +6889,15 @@ def list_models( ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]: return self._ListModels(self._session, self._host, self._interceptor) # type: ignore + @property + def list_model_version_checkpoints( + self, + ) -> Callable[ + [model_service.ListModelVersionCheckpointsRequest], + model_service.ListModelVersionCheckpointsResponse, + ]: + return self._ListModelVersionCheckpoints(self._session, self._host, self._interceptor) # type: ignore + @property def list_model_versions( self, @@ -6101,7 +6974,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -6255,7 +7127,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -6407,7 +7278,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -6560,7 +7430,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -6720,7 +7589,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -6872,7 +7740,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -6996,7 +7863,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -7120,7 +7986,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -7272,7 +8137,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -7424,7 +8288,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py index ec3789a942..5bc633e237 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -763,6 +763,53 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseListModelVersionCheckpoints: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}:listCheckpoints", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = model_service.ListModelVersionCheckpointsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseModelServiceRestTransport._BaseListModelVersionCheckpoints._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseListModelVersions: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -1161,6 +1208,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -1247,6 +1298,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -1325,6 +1381,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -1506,6 +1566,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1642,6 +1706,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1650,6 +1718,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1690,6 +1766,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1871,6 +1951,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1899,6 +1983,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2011,6 +2099,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2019,6 +2111,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2079,6 +2179,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2260,6 +2364,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2300,6 +2408,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2412,6 +2524,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2420,6 +2536,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2480,6 +2604,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2657,6 +2785,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2697,6 +2829,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2809,6 +2945,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2829,6 +2973,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2877,6 +3025,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -3062,6 +3214,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3094,6 +3250,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3202,6 +3362,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3210,6 +3374,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3270,6 +3442,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/notebook_service/__init__.py b/google/cloud/aiplatform_v1/services/notebook_service/__init__.py index 4e038695c2..2cb87d3354 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py index 646941b048..d165cc15b0 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.notebook_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import job_state @@ -61,16 +60,19 @@ from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime from google.cloud.aiplatform_v1.types import notebook_runtime_template_ref from google.cloud.aiplatform_v1.types import notebook_service +from google.cloud.aiplatform_v1.types import notebook_software_config from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport from .client import NotebookServiceClient @@ -161,7 +163,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: NotebookServiceAsyncClient: The constructed client. """ - return NotebookServiceClient.from_service_account_info.__func__(NotebookServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + NotebookServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(NotebookServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -177,7 +182,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: NotebookServiceAsyncClient: The constructed client. """ - return NotebookServiceClient.from_service_account_file.__func__(NotebookServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + NotebookServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(NotebookServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -320,21 +328,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.NotebookServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.NotebookService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.NotebookService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.NotebookService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.NotebookService", + "credentialsType": None, + } + ), ) async def create_notebook_runtime_template( @@ -435,8 +445,13 @@ async def sample_create_notebook_runtime_template(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, notebook_runtime_template, notebook_runtime_template_id] + flattened_params = [ + parent, + notebook_runtime_template, + notebook_runtime_template_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -565,7 +580,10 @@ async def sample_get_notebook_runtime_template(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -680,7 +698,10 @@ async def sample_list_notebook_runtime_templates(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -818,7 +839,10 @@ async def sample_delete_notebook_runtime_template(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -934,7 +958,7 @@ async def sample_update_notebook_runtime_template(): Input format: ``{paths: "${updated_filed}"}`` Updatable fields: - - ``encryption_spec.kms_key_name`` + - ``encryption_spec.kms_key_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -959,7 +983,10 @@ async def sample_update_notebook_runtime_template(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([notebook_runtime_template, update_mask]) + flattened_params = [notebook_runtime_template, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1118,14 +1145,22 @@ async def sample_assign_notebook_runtime(): The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.NotebookRuntime` A runtime is a virtual machine allocated to a particular user for a particular Notebook file on temporary basis with - lifetime limited to 24 hours. + lifetime. Default runtimes have a lifetime of 18 + hours, while custom runtimes last for 6 months from + their creation or last upgrade. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, notebook_runtime_template, notebook_runtime, notebook_runtime_id] + flattened_params = [ + parent, + notebook_runtime_template, + notebook_runtime, + notebook_runtime_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -1251,13 +1286,19 @@ async def sample_get_notebook_runtime(): A runtime is a virtual machine allocated to a particular user for a particular Notebook file on temporary - basis with lifetime limited to 24 hours. + basis with lifetime. Default runtimes + have a lifetime of 18 hours, while + custom runtimes last for 6 months from + their creation or last upgrade. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1372,7 +1413,10 @@ async def sample_list_notebook_runtimes(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1512,7 +1556,10 @@ async def sample_delete_notebook_runtime(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1641,7 +1688,10 @@ async def sample_upgrade_notebook_runtime(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1770,7 +1820,10 @@ async def sample_start_notebook_runtime(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1899,7 +1952,10 @@ async def sample_stop_notebook_runtime(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2050,8 +2106,9 @@ async def sample_create_notebook_execution_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, notebook_execution_job, notebook_execution_job_id] + flattened_params = [parent, notebook_execution_job, notebook_execution_job_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -2174,7 +2231,10 @@ async def sample_get_notebook_execution_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2289,7 +2349,10 @@ async def sample_list_notebook_execution_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2425,7 +2488,10 @@ async def sample_delete_notebook_execution_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3184,5 +3250,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("NotebookServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/client.py b/google/cloud/aiplatform_v1/services/notebook_service/client.py index 32e26cd961..3ea2d02333 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/client.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.notebook_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import job_state @@ -75,16 +76,19 @@ from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime from google.cloud.aiplatform_v1.types import notebook_runtime_template_ref from google.cloud.aiplatform_v1.types import notebook_service +from google.cloud.aiplatform_v1.types import notebook_software_config from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.duration_pb2 as duration_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import NotebookServiceGrpcTransport from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport @@ -186,6 +190,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -502,12 +534,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = NotebookServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -515,7 +543,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -547,20 +575,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = NotebookServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -653,6 +675,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -743,11 +792,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = NotebookServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + NotebookServiceClient._read_environment_variables() + ) self._client_cert_source = NotebookServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -859,21 +906,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.NotebookServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.NotebookService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.NotebookService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.NotebookService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.NotebookService", + "credentialsType": None, + } + ), ) def create_notebook_runtime_template( @@ -974,8 +1025,13 @@ def sample_create_notebook_runtime_template(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, notebook_runtime_template, notebook_runtime_template_id] + flattened_params = [ + parent, + notebook_runtime_template, + notebook_runtime_template_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -1103,7 +1159,10 @@ def sample_get_notebook_runtime_template(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1217,7 +1276,10 @@ def sample_list_notebook_runtime_templates(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1354,7 +1416,10 @@ def sample_delete_notebook_runtime_template(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1469,7 +1534,7 @@ def sample_update_notebook_runtime_template(): Input format: ``{paths: "${updated_filed}"}`` Updatable fields: - - ``encryption_spec.kms_key_name`` + - ``encryption_spec.kms_key_name`` This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this @@ -1494,7 +1559,10 @@ def sample_update_notebook_runtime_template(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([notebook_runtime_template, update_mask]) + flattened_params = [notebook_runtime_template, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1652,14 +1720,22 @@ def sample_assign_notebook_runtime(): The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.NotebookRuntime` A runtime is a virtual machine allocated to a particular user for a particular Notebook file on temporary basis with - lifetime limited to 24 hours. + lifetime. Default runtimes have a lifetime of 18 + hours, while custom runtimes last for 6 months from + their creation or last upgrade. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, notebook_runtime_template, notebook_runtime, notebook_runtime_id] + flattened_params = [ + parent, + notebook_runtime_template, + notebook_runtime, + notebook_runtime_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -1782,13 +1858,19 @@ def sample_get_notebook_runtime(): A runtime is a virtual machine allocated to a particular user for a particular Notebook file on temporary - basis with lifetime limited to 24 hours. + basis with lifetime. Default runtimes + have a lifetime of 18 hours, while + custom runtimes last for 6 months from + their creation or last upgrade. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1900,7 +1982,10 @@ def sample_list_notebook_runtimes(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2037,7 +2122,10 @@ def sample_delete_notebook_runtime(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2163,7 +2251,10 @@ def sample_upgrade_notebook_runtime(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2289,7 +2380,10 @@ def sample_start_notebook_runtime(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2415,7 +2509,10 @@ def sample_stop_notebook_runtime(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2563,8 +2660,9 @@ def sample_create_notebook_execution_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, notebook_execution_job, notebook_execution_job_id] + flattened_params = [parent, notebook_execution_job, notebook_execution_job_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -2686,7 +2784,10 @@ def sample_get_notebook_execution_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2800,7 +2901,10 @@ def sample_list_notebook_execution_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2935,7 +3039,10 @@ def sample_delete_notebook_execution_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3042,16 +3149,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -3097,16 +3208,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -3269,16 +3384,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -3390,16 +3509,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -3512,16 +3635,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -3572,16 +3699,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -3627,16 +3758,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -3682,21 +3817,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("NotebookServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/pagers.py b/google/cloud/aiplatform_v1/services/notebook_service/pagers.py index 58c5af8144..8e187ee9ea 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/__init__.py index 956805dac0..e7b59a5466 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py index 8fdd1b6e2c..2816915af9 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import notebook_execution_job from google.cloud.aiplatform_v1.types import notebook_runtime @@ -39,6 +40,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class NotebookServiceTransport(abc.ABC): """Abstract transport class for NotebookService.""" @@ -70,9 +74,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -85,8 +90,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -101,11 +104,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -471,13 +479,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py index af558126f4..f0f291acdc 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -77,12 +77,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.NotebookService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -160,9 +159,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -296,9 +296,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -369,12 +370,12 @@ def create_notebook_runtime_template( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_notebook_runtime_template" not in self._stubs: - self._stubs[ - "create_notebook_runtime_template" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookRuntimeTemplate", - request_serializer=notebook_service.CreateNotebookRuntimeTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_notebook_runtime_template"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookRuntimeTemplate", + request_serializer=notebook_service.CreateNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_notebook_runtime_template"] @@ -400,12 +401,12 @@ def get_notebook_runtime_template( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_notebook_runtime_template" not in self._stubs: - self._stubs[ - "get_notebook_runtime_template" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/GetNotebookRuntimeTemplate", - request_serializer=notebook_service.GetNotebookRuntimeTemplateRequest.serialize, - response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + self._stubs["get_notebook_runtime_template"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookRuntimeTemplate", + request_serializer=notebook_service.GetNotebookRuntimeTemplateRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + ) ) return self._stubs["get_notebook_runtime_template"] @@ -432,12 +433,12 @@ def list_notebook_runtime_templates( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_notebook_runtime_templates" not in self._stubs: - self._stubs[ - "list_notebook_runtime_templates" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/ListNotebookRuntimeTemplates", - request_serializer=notebook_service.ListNotebookRuntimeTemplatesRequest.serialize, - response_deserializer=notebook_service.ListNotebookRuntimeTemplatesResponse.deserialize, + self._stubs["list_notebook_runtime_templates"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookRuntimeTemplates", + request_serializer=notebook_service.ListNotebookRuntimeTemplatesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimeTemplatesResponse.deserialize, + ) ) return self._stubs["list_notebook_runtime_templates"] @@ -464,12 +465,12 @@ def delete_notebook_runtime_template( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_notebook_runtime_template" not in self._stubs: - self._stubs[ - "delete_notebook_runtime_template" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookRuntimeTemplate", - request_serializer=notebook_service.DeleteNotebookRuntimeTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_notebook_runtime_template"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookRuntimeTemplate", + request_serializer=notebook_service.DeleteNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_notebook_runtime_template"] @@ -496,12 +497,12 @@ def update_notebook_runtime_template( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_notebook_runtime_template" not in self._stubs: - self._stubs[ - "update_notebook_runtime_template" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/UpdateNotebookRuntimeTemplate", - request_serializer=notebook_service.UpdateNotebookRuntimeTemplateRequest.serialize, - response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + self._stubs["update_notebook_runtime_template"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/UpdateNotebookRuntimeTemplate", + request_serializer=notebook_service.UpdateNotebookRuntimeTemplateRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + ) ) return self._stubs["update_notebook_runtime_template"] @@ -725,12 +726,12 @@ def create_notebook_execution_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_notebook_execution_job" not in self._stubs: - self._stubs[ - "create_notebook_execution_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookExecutionJob", - request_serializer=notebook_service.CreateNotebookExecutionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_notebook_execution_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookExecutionJob", + request_serializer=notebook_service.CreateNotebookExecutionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_notebook_execution_job"] @@ -756,12 +757,12 @@ def get_notebook_execution_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_notebook_execution_job" not in self._stubs: - self._stubs[ - "get_notebook_execution_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/GetNotebookExecutionJob", - request_serializer=notebook_service.GetNotebookExecutionJobRequest.serialize, - response_deserializer=notebook_execution_job.NotebookExecutionJob.deserialize, + self._stubs["get_notebook_execution_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookExecutionJob", + request_serializer=notebook_service.GetNotebookExecutionJobRequest.serialize, + response_deserializer=notebook_execution_job.NotebookExecutionJob.deserialize, + ) ) return self._stubs["get_notebook_execution_job"] @@ -787,12 +788,12 @@ def list_notebook_execution_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_notebook_execution_jobs" not in self._stubs: - self._stubs[ - "list_notebook_execution_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/ListNotebookExecutionJobs", - request_serializer=notebook_service.ListNotebookExecutionJobsRequest.serialize, - response_deserializer=notebook_service.ListNotebookExecutionJobsResponse.deserialize, + self._stubs["list_notebook_execution_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookExecutionJobs", + request_serializer=notebook_service.ListNotebookExecutionJobsRequest.serialize, + response_deserializer=notebook_service.ListNotebookExecutionJobsResponse.deserialize, + ) ) return self._stubs["list_notebook_execution_jobs"] @@ -817,12 +818,12 @@ def delete_notebook_execution_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_notebook_execution_job" not in self._stubs: - self._stubs[ - "delete_notebook_execution_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookExecutionJob", - request_serializer=notebook_service.DeleteNotebookExecutionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_notebook_execution_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookExecutionJob", + request_serializer=notebook_service.DeleteNotebookExecutionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_notebook_execution_job"] diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py index e6c232e5fa..05fc061f1b 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -156,8 +156,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -208,9 +209,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -376,12 +378,12 @@ def create_notebook_runtime_template( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_notebook_runtime_template" not in self._stubs: - self._stubs[ - "create_notebook_runtime_template" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookRuntimeTemplate", - request_serializer=notebook_service.CreateNotebookRuntimeTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_notebook_runtime_template"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookRuntimeTemplate", + request_serializer=notebook_service.CreateNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_notebook_runtime_template"] @@ -407,12 +409,12 @@ def get_notebook_runtime_template( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_notebook_runtime_template" not in self._stubs: - self._stubs[ - "get_notebook_runtime_template" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/GetNotebookRuntimeTemplate", - request_serializer=notebook_service.GetNotebookRuntimeTemplateRequest.serialize, - response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + self._stubs["get_notebook_runtime_template"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookRuntimeTemplate", + request_serializer=notebook_service.GetNotebookRuntimeTemplateRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + ) ) return self._stubs["get_notebook_runtime_template"] @@ -439,12 +441,12 @@ def list_notebook_runtime_templates( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_notebook_runtime_templates" not in self._stubs: - self._stubs[ - "list_notebook_runtime_templates" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/ListNotebookRuntimeTemplates", - request_serializer=notebook_service.ListNotebookRuntimeTemplatesRequest.serialize, - response_deserializer=notebook_service.ListNotebookRuntimeTemplatesResponse.deserialize, + self._stubs["list_notebook_runtime_templates"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookRuntimeTemplates", + request_serializer=notebook_service.ListNotebookRuntimeTemplatesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimeTemplatesResponse.deserialize, + ) ) return self._stubs["list_notebook_runtime_templates"] @@ -471,12 +473,12 @@ def delete_notebook_runtime_template( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_notebook_runtime_template" not in self._stubs: - self._stubs[ - "delete_notebook_runtime_template" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookRuntimeTemplate", - request_serializer=notebook_service.DeleteNotebookRuntimeTemplateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_notebook_runtime_template"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookRuntimeTemplate", + request_serializer=notebook_service.DeleteNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_notebook_runtime_template"] @@ -503,12 +505,12 @@ def update_notebook_runtime_template( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_notebook_runtime_template" not in self._stubs: - self._stubs[ - "update_notebook_runtime_template" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/UpdateNotebookRuntimeTemplate", - request_serializer=notebook_service.UpdateNotebookRuntimeTemplateRequest.serialize, - response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + self._stubs["update_notebook_runtime_template"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/UpdateNotebookRuntimeTemplate", + request_serializer=notebook_service.UpdateNotebookRuntimeTemplateRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + ) ) return self._stubs["update_notebook_runtime_template"] @@ -739,12 +741,12 @@ def create_notebook_execution_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_notebook_execution_job" not in self._stubs: - self._stubs[ - "create_notebook_execution_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookExecutionJob", - request_serializer=notebook_service.CreateNotebookExecutionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_notebook_execution_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookExecutionJob", + request_serializer=notebook_service.CreateNotebookExecutionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_notebook_execution_job"] @@ -770,12 +772,12 @@ def get_notebook_execution_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_notebook_execution_job" not in self._stubs: - self._stubs[ - "get_notebook_execution_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/GetNotebookExecutionJob", - request_serializer=notebook_service.GetNotebookExecutionJobRequest.serialize, - response_deserializer=notebook_execution_job.NotebookExecutionJob.deserialize, + self._stubs["get_notebook_execution_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookExecutionJob", + request_serializer=notebook_service.GetNotebookExecutionJobRequest.serialize, + response_deserializer=notebook_execution_job.NotebookExecutionJob.deserialize, + ) ) return self._stubs["get_notebook_execution_job"] @@ -801,12 +803,12 @@ def list_notebook_execution_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_notebook_execution_jobs" not in self._stubs: - self._stubs[ - "list_notebook_execution_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/ListNotebookExecutionJobs", - request_serializer=notebook_service.ListNotebookExecutionJobsRequest.serialize, - response_deserializer=notebook_service.ListNotebookExecutionJobsResponse.deserialize, + self._stubs["list_notebook_execution_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookExecutionJobs", + request_serializer=notebook_service.ListNotebookExecutionJobsRequest.serialize, + response_deserializer=notebook_service.ListNotebookExecutionJobsResponse.deserialize, + ) ) return self._stubs["list_notebook_execution_jobs"] @@ -832,12 +834,12 @@ def delete_notebook_execution_job( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_notebook_execution_job" not in self._stubs: - self._stubs[ - "delete_notebook_execution_job" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookExecutionJob", - request_serializer=notebook_service.DeleteNotebookExecutionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_notebook_execution_job"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookExecutionJob", + request_serializer=notebook_service.DeleteNotebookExecutionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_notebook_execution_job"] diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py index 4fcbed978f..33befc8bea 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -65,6 +66,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class NotebookServiceRestInterceptor: """Interceptor for NotebookService. @@ -235,12 +239,35 @@ def post_assign_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for assign_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_assign_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_assign_notebook_runtime` interceptor runs + before the `post_assign_notebook_runtime_with_metadata` interceptor. """ return response + def post_assign_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for assign_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_assign_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_assign_notebook_runtime` interceptor. + When both interceptors are used, this `post_assign_notebook_runtime_with_metadata` interceptor runs after the + `post_assign_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_assign_notebook_runtime` will be passed to + `post_assign_notebook_runtime_with_metadata`. + """ + return response, metadata + def pre_create_notebook_execution_job( self, request: notebook_service.CreateNotebookExecutionJobRequest, @@ -261,12 +288,35 @@ def post_create_notebook_execution_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_notebook_execution_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_notebook_execution_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_create_notebook_execution_job` interceptor runs + before the `post_create_notebook_execution_job_with_metadata` interceptor. """ return response + def post_create_notebook_execution_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_notebook_execution_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_create_notebook_execution_job_with_metadata` + interceptor in new development instead of the `post_create_notebook_execution_job` interceptor. + When both interceptors are used, this `post_create_notebook_execution_job_with_metadata` interceptor runs after the + `post_create_notebook_execution_job` interceptor. The (possibly modified) response returned by + `post_create_notebook_execution_job` will be passed to + `post_create_notebook_execution_job_with_metadata`. + """ + return response, metadata + def pre_create_notebook_runtime_template( self, request: notebook_service.CreateNotebookRuntimeTemplateRequest, @@ -287,12 +337,35 @@ def post_create_notebook_runtime_template( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_notebook_runtime_template - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_notebook_runtime_template_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_create_notebook_runtime_template` interceptor runs + before the `post_create_notebook_runtime_template_with_metadata` interceptor. """ return response + def post_create_notebook_runtime_template_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_notebook_runtime_template + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_create_notebook_runtime_template_with_metadata` + interceptor in new development instead of the `post_create_notebook_runtime_template` interceptor. + When both interceptors are used, this `post_create_notebook_runtime_template_with_metadata` interceptor runs after the + `post_create_notebook_runtime_template` interceptor. The (possibly modified) response returned by + `post_create_notebook_runtime_template` will be passed to + `post_create_notebook_runtime_template_with_metadata`. + """ + return response, metadata + def pre_delete_notebook_execution_job( self, request: notebook_service.DeleteNotebookExecutionJobRequest, @@ -313,12 +386,35 @@ def post_delete_notebook_execution_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_notebook_execution_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_notebook_execution_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_delete_notebook_execution_job` interceptor runs + before the `post_delete_notebook_execution_job_with_metadata` interceptor. """ return response + def post_delete_notebook_execution_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_notebook_execution_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_delete_notebook_execution_job_with_metadata` + interceptor in new development instead of the `post_delete_notebook_execution_job` interceptor. + When both interceptors are used, this `post_delete_notebook_execution_job_with_metadata` interceptor runs after the + `post_delete_notebook_execution_job` interceptor. The (possibly modified) response returned by + `post_delete_notebook_execution_job` will be passed to + `post_delete_notebook_execution_job_with_metadata`. + """ + return response, metadata + def pre_delete_notebook_runtime( self, request: notebook_service.DeleteNotebookRuntimeRequest, @@ -339,12 +435,35 @@ def post_delete_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_delete_notebook_runtime` interceptor runs + before the `post_delete_notebook_runtime_with_metadata` interceptor. """ return response + def post_delete_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_delete_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_delete_notebook_runtime` interceptor. + When both interceptors are used, this `post_delete_notebook_runtime_with_metadata` interceptor runs after the + `post_delete_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_delete_notebook_runtime` will be passed to + `post_delete_notebook_runtime_with_metadata`. + """ + return response, metadata + def pre_delete_notebook_runtime_template( self, request: notebook_service.DeleteNotebookRuntimeTemplateRequest, @@ -365,12 +484,35 @@ def post_delete_notebook_runtime_template( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_notebook_runtime_template - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_notebook_runtime_template_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_delete_notebook_runtime_template` interceptor runs + before the `post_delete_notebook_runtime_template_with_metadata` interceptor. """ return response + def post_delete_notebook_runtime_template_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_notebook_runtime_template + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_delete_notebook_runtime_template_with_metadata` + interceptor in new development instead of the `post_delete_notebook_runtime_template` interceptor. + When both interceptors are used, this `post_delete_notebook_runtime_template_with_metadata` interceptor runs after the + `post_delete_notebook_runtime_template` interceptor. The (possibly modified) response returned by + `post_delete_notebook_runtime_template` will be passed to + `post_delete_notebook_runtime_template_with_metadata`. + """ + return response, metadata + def pre_get_notebook_execution_job( self, request: notebook_service.GetNotebookExecutionJobRequest, @@ -391,12 +533,38 @@ def post_get_notebook_execution_job( ) -> notebook_execution_job.NotebookExecutionJob: """Post-rpc interceptor for get_notebook_execution_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_notebook_execution_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_get_notebook_execution_job` interceptor runs + before the `post_get_notebook_execution_job_with_metadata` interceptor. """ return response + def post_get_notebook_execution_job_with_metadata( + self, + response: notebook_execution_job.NotebookExecutionJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_execution_job.NotebookExecutionJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_notebook_execution_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_get_notebook_execution_job_with_metadata` + interceptor in new development instead of the `post_get_notebook_execution_job` interceptor. + When both interceptors are used, this `post_get_notebook_execution_job_with_metadata` interceptor runs after the + `post_get_notebook_execution_job` interceptor. The (possibly modified) response returned by + `post_get_notebook_execution_job` will be passed to + `post_get_notebook_execution_job_with_metadata`. + """ + return response, metadata + def pre_get_notebook_runtime( self, request: notebook_service.GetNotebookRuntimeRequest, @@ -417,12 +585,37 @@ def post_get_notebook_runtime( ) -> notebook_runtime.NotebookRuntime: """Post-rpc interceptor for get_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_get_notebook_runtime` interceptor runs + before the `post_get_notebook_runtime_with_metadata` interceptor. """ return response + def post_get_notebook_runtime_with_metadata( + self, + response: notebook_runtime.NotebookRuntime, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_runtime.NotebookRuntime, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_get_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_get_notebook_runtime` interceptor. + When both interceptors are used, this `post_get_notebook_runtime_with_metadata` interceptor runs after the + `post_get_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_get_notebook_runtime` will be passed to + `post_get_notebook_runtime_with_metadata`. + """ + return response, metadata + def pre_get_notebook_runtime_template( self, request: notebook_service.GetNotebookRuntimeTemplateRequest, @@ -443,12 +636,38 @@ def post_get_notebook_runtime_template( ) -> notebook_runtime.NotebookRuntimeTemplate: """Post-rpc interceptor for get_notebook_runtime_template - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_notebook_runtime_template_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_get_notebook_runtime_template` interceptor runs + before the `post_get_notebook_runtime_template_with_metadata` interceptor. """ return response + def post_get_notebook_runtime_template_with_metadata( + self, + response: notebook_runtime.NotebookRuntimeTemplate, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_runtime.NotebookRuntimeTemplate, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_notebook_runtime_template + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_get_notebook_runtime_template_with_metadata` + interceptor in new development instead of the `post_get_notebook_runtime_template` interceptor. + When both interceptors are used, this `post_get_notebook_runtime_template_with_metadata` interceptor runs after the + `post_get_notebook_runtime_template` interceptor. The (possibly modified) response returned by + `post_get_notebook_runtime_template` will be passed to + `post_get_notebook_runtime_template_with_metadata`. + """ + return response, metadata + def pre_list_notebook_execution_jobs( self, request: notebook_service.ListNotebookExecutionJobsRequest, @@ -469,12 +688,38 @@ def post_list_notebook_execution_jobs( ) -> notebook_service.ListNotebookExecutionJobsResponse: """Post-rpc interceptor for list_notebook_execution_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_notebook_execution_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_list_notebook_execution_jobs` interceptor runs + before the `post_list_notebook_execution_jobs_with_metadata` interceptor. """ return response + def post_list_notebook_execution_jobs_with_metadata( + self, + response: notebook_service.ListNotebookExecutionJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_service.ListNotebookExecutionJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_notebook_execution_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_list_notebook_execution_jobs_with_metadata` + interceptor in new development instead of the `post_list_notebook_execution_jobs` interceptor. + When both interceptors are used, this `post_list_notebook_execution_jobs_with_metadata` interceptor runs after the + `post_list_notebook_execution_jobs` interceptor. The (possibly modified) response returned by + `post_list_notebook_execution_jobs` will be passed to + `post_list_notebook_execution_jobs_with_metadata`. + """ + return response, metadata + def pre_list_notebook_runtimes( self, request: notebook_service.ListNotebookRuntimesRequest, @@ -495,12 +740,38 @@ def post_list_notebook_runtimes( ) -> notebook_service.ListNotebookRuntimesResponse: """Post-rpc interceptor for list_notebook_runtimes - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_notebook_runtimes_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_list_notebook_runtimes` interceptor runs + before the `post_list_notebook_runtimes_with_metadata` interceptor. """ return response + def post_list_notebook_runtimes_with_metadata( + self, + response: notebook_service.ListNotebookRuntimesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_service.ListNotebookRuntimesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_notebook_runtimes + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_list_notebook_runtimes_with_metadata` + interceptor in new development instead of the `post_list_notebook_runtimes` interceptor. + When both interceptors are used, this `post_list_notebook_runtimes_with_metadata` interceptor runs after the + `post_list_notebook_runtimes` interceptor. The (possibly modified) response returned by + `post_list_notebook_runtimes` will be passed to + `post_list_notebook_runtimes_with_metadata`. + """ + return response, metadata + def pre_list_notebook_runtime_templates( self, request: notebook_service.ListNotebookRuntimeTemplatesRequest, @@ -521,12 +792,38 @@ def post_list_notebook_runtime_templates( ) -> notebook_service.ListNotebookRuntimeTemplatesResponse: """Post-rpc interceptor for list_notebook_runtime_templates - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_notebook_runtime_templates_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_list_notebook_runtime_templates` interceptor runs + before the `post_list_notebook_runtime_templates_with_metadata` interceptor. """ return response + def post_list_notebook_runtime_templates_with_metadata( + self, + response: notebook_service.ListNotebookRuntimeTemplatesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_service.ListNotebookRuntimeTemplatesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_notebook_runtime_templates + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_list_notebook_runtime_templates_with_metadata` + interceptor in new development instead of the `post_list_notebook_runtime_templates` interceptor. + When both interceptors are used, this `post_list_notebook_runtime_templates_with_metadata` interceptor runs after the + `post_list_notebook_runtime_templates` interceptor. The (possibly modified) response returned by + `post_list_notebook_runtime_templates` will be passed to + `post_list_notebook_runtime_templates_with_metadata`. + """ + return response, metadata + def pre_start_notebook_runtime( self, request: notebook_service.StartNotebookRuntimeRequest, @@ -547,12 +844,35 @@ def post_start_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for start_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_start_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_start_notebook_runtime` interceptor runs + before the `post_start_notebook_runtime_with_metadata` interceptor. """ return response + def post_start_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for start_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_start_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_start_notebook_runtime` interceptor. + When both interceptors are used, this `post_start_notebook_runtime_with_metadata` interceptor runs after the + `post_start_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_start_notebook_runtime` will be passed to + `post_start_notebook_runtime_with_metadata`. + """ + return response, metadata + def pre_stop_notebook_runtime( self, request: notebook_service.StopNotebookRuntimeRequest, @@ -573,12 +893,35 @@ def post_stop_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for stop_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_stop_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_stop_notebook_runtime` interceptor runs + before the `post_stop_notebook_runtime_with_metadata` interceptor. """ return response + def post_stop_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for stop_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_stop_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_stop_notebook_runtime` interceptor. + When both interceptors are used, this `post_stop_notebook_runtime_with_metadata` interceptor runs after the + `post_stop_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_stop_notebook_runtime` will be passed to + `post_stop_notebook_runtime_with_metadata`. + """ + return response, metadata + def pre_update_notebook_runtime_template( self, request: notebook_service.UpdateNotebookRuntimeTemplateRequest, @@ -599,12 +942,38 @@ def post_update_notebook_runtime_template( ) -> notebook_runtime.NotebookRuntimeTemplate: """Post-rpc interceptor for update_notebook_runtime_template - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_notebook_runtime_template_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_update_notebook_runtime_template` interceptor runs + before the `post_update_notebook_runtime_template_with_metadata` interceptor. """ return response + def post_update_notebook_runtime_template_with_metadata( + self, + response: notebook_runtime.NotebookRuntimeTemplate, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_runtime.NotebookRuntimeTemplate, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for update_notebook_runtime_template + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_update_notebook_runtime_template_with_metadata` + interceptor in new development instead of the `post_update_notebook_runtime_template` interceptor. + When both interceptors are used, this `post_update_notebook_runtime_template_with_metadata` interceptor runs after the + `post_update_notebook_runtime_template` interceptor. The (possibly modified) response returned by + `post_update_notebook_runtime_template` will be passed to + `post_update_notebook_runtime_template_with_metadata`. + """ + return response, metadata + def pre_upgrade_notebook_runtime( self, request: notebook_service.UpgradeNotebookRuntimeRequest, @@ -625,12 +994,35 @@ def post_upgrade_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for upgrade_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_upgrade_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_upgrade_notebook_runtime` interceptor runs + before the `post_upgrade_notebook_runtime_with_metadata` interceptor. """ return response + def post_upgrade_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for upgrade_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_upgrade_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_upgrade_notebook_runtime` interceptor. + When both interceptors are used, this `post_upgrade_notebook_runtime_with_metadata` interceptor runs after the + `post_upgrade_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_upgrade_notebook_runtime` will be passed to + `post_upgrade_notebook_runtime_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -921,9 +1313,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1134,6 +1527,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1270,6 +1667,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1278,6 +1679,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1318,6 +1727,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1480,6 +1893,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1508,6 +1925,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1620,6 +2041,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1628,6 +2053,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1688,6 +2121,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1850,6 +2287,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1890,6 +2331,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2002,6 +2447,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2010,6 +2459,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2070,6 +2527,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2228,6 +2689,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2268,6 +2733,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2380,6 +2849,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2400,6 +2877,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2448,6 +2929,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2614,6 +3099,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2646,6 +3135,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2754,6 +3247,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2762,6 +3259,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2822,6 +3327,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2930,7 +3439,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2972,6 +3481,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_assign_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_assign_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3085,7 +3598,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3127,6 +3640,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_notebook_execution_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_create_notebook_execution_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3240,7 +3759,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3280,6 +3799,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_notebook_runtime_template(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_create_notebook_runtime_template_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3388,7 +3913,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3429,6 +3954,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_notebook_execution_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_delete_notebook_execution_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3536,7 +4067,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3577,6 +4108,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3685,7 +4220,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3724,6 +4259,12 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_notebook_runtime_template(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_delete_notebook_runtime_template_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3874,6 +4415,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_notebook_execution_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_notebook_execution_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3956,7 +4501,10 @@ def __call__( A runtime is a virtual machine allocated to a particular user for a particular Notebook file on temporary - basis with lifetime limited to 24 hours. + basis with lifetime. Default runtimes + have a lifetime of 18 hours, while + custom runtimes last for 6 months from + their creation or last upgrade. """ @@ -4025,6 +4573,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4180,6 +4732,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_notebook_runtime_template(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_get_notebook_runtime_template_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4332,6 +4890,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_notebook_execution_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_notebook_execution_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4483,6 +5045,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_notebook_runtimes(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_notebook_runtimes_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4633,6 +5199,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_notebook_runtime_templates(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_list_notebook_runtime_templates_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4749,7 +5321,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4789,6 +5361,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_start_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_start_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4901,7 +5477,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -4941,6 +5517,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_stop_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_stop_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5098,6 +5678,12 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_notebook_runtime_template(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + self._interceptor.post_update_notebook_runtime_template_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5212,7 +5798,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -5254,6 +5840,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_upgrade_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_upgrade_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -5486,7 +6076,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -5628,7 +6217,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -5770,7 +6358,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -5913,7 +6500,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -6061,7 +6647,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -6205,7 +6790,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -6321,7 +6905,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -6437,7 +7020,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -6579,7 +7161,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -6721,7 +7302,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py index bb38b8a807..367d984740 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -83,6 +83,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncNotebookServiceRestInterceptor: """Asynchronous Interceptor for NotebookService. @@ -253,12 +256,35 @@ async def post_assign_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for assign_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_assign_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_assign_notebook_runtime` interceptor runs + before the `post_assign_notebook_runtime_with_metadata` interceptor. """ return response + async def post_assign_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for assign_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_assign_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_assign_notebook_runtime` interceptor. + When both interceptors are used, this `post_assign_notebook_runtime_with_metadata` interceptor runs after the + `post_assign_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_assign_notebook_runtime` will be passed to + `post_assign_notebook_runtime_with_metadata`. + """ + return response, metadata + async def pre_create_notebook_execution_job( self, request: notebook_service.CreateNotebookExecutionJobRequest, @@ -279,12 +305,35 @@ async def post_create_notebook_execution_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_notebook_execution_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_notebook_execution_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_create_notebook_execution_job` interceptor runs + before the `post_create_notebook_execution_job_with_metadata` interceptor. """ return response + async def post_create_notebook_execution_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_notebook_execution_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_create_notebook_execution_job_with_metadata` + interceptor in new development instead of the `post_create_notebook_execution_job` interceptor. + When both interceptors are used, this `post_create_notebook_execution_job_with_metadata` interceptor runs after the + `post_create_notebook_execution_job` interceptor. The (possibly modified) response returned by + `post_create_notebook_execution_job` will be passed to + `post_create_notebook_execution_job_with_metadata`. + """ + return response, metadata + async def pre_create_notebook_runtime_template( self, request: notebook_service.CreateNotebookRuntimeTemplateRequest, @@ -305,12 +354,35 @@ async def post_create_notebook_runtime_template( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_notebook_runtime_template - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_notebook_runtime_template_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_create_notebook_runtime_template` interceptor runs + before the `post_create_notebook_runtime_template_with_metadata` interceptor. """ return response + async def post_create_notebook_runtime_template_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_notebook_runtime_template + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_create_notebook_runtime_template_with_metadata` + interceptor in new development instead of the `post_create_notebook_runtime_template` interceptor. + When both interceptors are used, this `post_create_notebook_runtime_template_with_metadata` interceptor runs after the + `post_create_notebook_runtime_template` interceptor. The (possibly modified) response returned by + `post_create_notebook_runtime_template` will be passed to + `post_create_notebook_runtime_template_with_metadata`. + """ + return response, metadata + async def pre_delete_notebook_execution_job( self, request: notebook_service.DeleteNotebookExecutionJobRequest, @@ -331,12 +403,35 @@ async def post_delete_notebook_execution_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_notebook_execution_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_notebook_execution_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_delete_notebook_execution_job` interceptor runs + before the `post_delete_notebook_execution_job_with_metadata` interceptor. """ return response + async def post_delete_notebook_execution_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_notebook_execution_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_delete_notebook_execution_job_with_metadata` + interceptor in new development instead of the `post_delete_notebook_execution_job` interceptor. + When both interceptors are used, this `post_delete_notebook_execution_job_with_metadata` interceptor runs after the + `post_delete_notebook_execution_job` interceptor. The (possibly modified) response returned by + `post_delete_notebook_execution_job` will be passed to + `post_delete_notebook_execution_job_with_metadata`. + """ + return response, metadata + async def pre_delete_notebook_runtime( self, request: notebook_service.DeleteNotebookRuntimeRequest, @@ -357,12 +452,35 @@ async def post_delete_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_delete_notebook_runtime` interceptor runs + before the `post_delete_notebook_runtime_with_metadata` interceptor. """ return response + async def post_delete_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_delete_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_delete_notebook_runtime` interceptor. + When both interceptors are used, this `post_delete_notebook_runtime_with_metadata` interceptor runs after the + `post_delete_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_delete_notebook_runtime` will be passed to + `post_delete_notebook_runtime_with_metadata`. + """ + return response, metadata + async def pre_delete_notebook_runtime_template( self, request: notebook_service.DeleteNotebookRuntimeTemplateRequest, @@ -383,12 +501,35 @@ async def post_delete_notebook_runtime_template( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_notebook_runtime_template - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_notebook_runtime_template_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_delete_notebook_runtime_template` interceptor runs + before the `post_delete_notebook_runtime_template_with_metadata` interceptor. """ return response + async def post_delete_notebook_runtime_template_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_notebook_runtime_template + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_delete_notebook_runtime_template_with_metadata` + interceptor in new development instead of the `post_delete_notebook_runtime_template` interceptor. + When both interceptors are used, this `post_delete_notebook_runtime_template_with_metadata` interceptor runs after the + `post_delete_notebook_runtime_template` interceptor. The (possibly modified) response returned by + `post_delete_notebook_runtime_template` will be passed to + `post_delete_notebook_runtime_template_with_metadata`. + """ + return response, metadata + async def pre_get_notebook_execution_job( self, request: notebook_service.GetNotebookExecutionJobRequest, @@ -409,12 +550,38 @@ async def post_get_notebook_execution_job( ) -> notebook_execution_job.NotebookExecutionJob: """Post-rpc interceptor for get_notebook_execution_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_notebook_execution_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_get_notebook_execution_job` interceptor runs + before the `post_get_notebook_execution_job_with_metadata` interceptor. """ return response + async def post_get_notebook_execution_job_with_metadata( + self, + response: notebook_execution_job.NotebookExecutionJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_execution_job.NotebookExecutionJob, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_notebook_execution_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_get_notebook_execution_job_with_metadata` + interceptor in new development instead of the `post_get_notebook_execution_job` interceptor. + When both interceptors are used, this `post_get_notebook_execution_job_with_metadata` interceptor runs after the + `post_get_notebook_execution_job` interceptor. The (possibly modified) response returned by + `post_get_notebook_execution_job` will be passed to + `post_get_notebook_execution_job_with_metadata`. + """ + return response, metadata + async def pre_get_notebook_runtime( self, request: notebook_service.GetNotebookRuntimeRequest, @@ -435,12 +602,37 @@ async def post_get_notebook_runtime( ) -> notebook_runtime.NotebookRuntime: """Post-rpc interceptor for get_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_get_notebook_runtime` interceptor runs + before the `post_get_notebook_runtime_with_metadata` interceptor. """ return response + async def post_get_notebook_runtime_with_metadata( + self, + response: notebook_runtime.NotebookRuntime, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_runtime.NotebookRuntime, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_get_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_get_notebook_runtime` interceptor. + When both interceptors are used, this `post_get_notebook_runtime_with_metadata` interceptor runs after the + `post_get_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_get_notebook_runtime` will be passed to + `post_get_notebook_runtime_with_metadata`. + """ + return response, metadata + async def pre_get_notebook_runtime_template( self, request: notebook_service.GetNotebookRuntimeTemplateRequest, @@ -461,12 +653,38 @@ async def post_get_notebook_runtime_template( ) -> notebook_runtime.NotebookRuntimeTemplate: """Post-rpc interceptor for get_notebook_runtime_template - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_notebook_runtime_template_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_get_notebook_runtime_template` interceptor runs + before the `post_get_notebook_runtime_template_with_metadata` interceptor. """ return response + async def post_get_notebook_runtime_template_with_metadata( + self, + response: notebook_runtime.NotebookRuntimeTemplate, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_runtime.NotebookRuntimeTemplate, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_notebook_runtime_template + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_get_notebook_runtime_template_with_metadata` + interceptor in new development instead of the `post_get_notebook_runtime_template` interceptor. + When both interceptors are used, this `post_get_notebook_runtime_template_with_metadata` interceptor runs after the + `post_get_notebook_runtime_template` interceptor. The (possibly modified) response returned by + `post_get_notebook_runtime_template` will be passed to + `post_get_notebook_runtime_template_with_metadata`. + """ + return response, metadata + async def pre_list_notebook_execution_jobs( self, request: notebook_service.ListNotebookExecutionJobsRequest, @@ -487,12 +705,38 @@ async def post_list_notebook_execution_jobs( ) -> notebook_service.ListNotebookExecutionJobsResponse: """Post-rpc interceptor for list_notebook_execution_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_notebook_execution_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_list_notebook_execution_jobs` interceptor runs + before the `post_list_notebook_execution_jobs_with_metadata` interceptor. """ return response + async def post_list_notebook_execution_jobs_with_metadata( + self, + response: notebook_service.ListNotebookExecutionJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_service.ListNotebookExecutionJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_notebook_execution_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_list_notebook_execution_jobs_with_metadata` + interceptor in new development instead of the `post_list_notebook_execution_jobs` interceptor. + When both interceptors are used, this `post_list_notebook_execution_jobs_with_metadata` interceptor runs after the + `post_list_notebook_execution_jobs` interceptor. The (possibly modified) response returned by + `post_list_notebook_execution_jobs` will be passed to + `post_list_notebook_execution_jobs_with_metadata`. + """ + return response, metadata + async def pre_list_notebook_runtimes( self, request: notebook_service.ListNotebookRuntimesRequest, @@ -513,12 +757,38 @@ async def post_list_notebook_runtimes( ) -> notebook_service.ListNotebookRuntimesResponse: """Post-rpc interceptor for list_notebook_runtimes - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_notebook_runtimes_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_list_notebook_runtimes` interceptor runs + before the `post_list_notebook_runtimes_with_metadata` interceptor. """ return response + async def post_list_notebook_runtimes_with_metadata( + self, + response: notebook_service.ListNotebookRuntimesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_service.ListNotebookRuntimesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_notebook_runtimes + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_list_notebook_runtimes_with_metadata` + interceptor in new development instead of the `post_list_notebook_runtimes` interceptor. + When both interceptors are used, this `post_list_notebook_runtimes_with_metadata` interceptor runs after the + `post_list_notebook_runtimes` interceptor. The (possibly modified) response returned by + `post_list_notebook_runtimes` will be passed to + `post_list_notebook_runtimes_with_metadata`. + """ + return response, metadata + async def pre_list_notebook_runtime_templates( self, request: notebook_service.ListNotebookRuntimeTemplatesRequest, @@ -539,12 +809,38 @@ async def post_list_notebook_runtime_templates( ) -> notebook_service.ListNotebookRuntimeTemplatesResponse: """Post-rpc interceptor for list_notebook_runtime_templates - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_notebook_runtime_templates_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_list_notebook_runtime_templates` interceptor runs + before the `post_list_notebook_runtime_templates_with_metadata` interceptor. """ return response + async def post_list_notebook_runtime_templates_with_metadata( + self, + response: notebook_service.ListNotebookRuntimeTemplatesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_service.ListNotebookRuntimeTemplatesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_notebook_runtime_templates + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_list_notebook_runtime_templates_with_metadata` + interceptor in new development instead of the `post_list_notebook_runtime_templates` interceptor. + When both interceptors are used, this `post_list_notebook_runtime_templates_with_metadata` interceptor runs after the + `post_list_notebook_runtime_templates` interceptor. The (possibly modified) response returned by + `post_list_notebook_runtime_templates` will be passed to + `post_list_notebook_runtime_templates_with_metadata`. + """ + return response, metadata + async def pre_start_notebook_runtime( self, request: notebook_service.StartNotebookRuntimeRequest, @@ -565,12 +861,35 @@ async def post_start_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for start_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_start_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_start_notebook_runtime` interceptor runs + before the `post_start_notebook_runtime_with_metadata` interceptor. """ return response + async def post_start_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for start_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_start_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_start_notebook_runtime` interceptor. + When both interceptors are used, this `post_start_notebook_runtime_with_metadata` interceptor runs after the + `post_start_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_start_notebook_runtime` will be passed to + `post_start_notebook_runtime_with_metadata`. + """ + return response, metadata + async def pre_stop_notebook_runtime( self, request: notebook_service.StopNotebookRuntimeRequest, @@ -591,12 +910,35 @@ async def post_stop_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for stop_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_stop_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_stop_notebook_runtime` interceptor runs + before the `post_stop_notebook_runtime_with_metadata` interceptor. """ return response + async def post_stop_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for stop_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_stop_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_stop_notebook_runtime` interceptor. + When both interceptors are used, this `post_stop_notebook_runtime_with_metadata` interceptor runs after the + `post_stop_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_stop_notebook_runtime` will be passed to + `post_stop_notebook_runtime_with_metadata`. + """ + return response, metadata + async def pre_update_notebook_runtime_template( self, request: notebook_service.UpdateNotebookRuntimeTemplateRequest, @@ -617,12 +959,38 @@ async def post_update_notebook_runtime_template( ) -> notebook_runtime.NotebookRuntimeTemplate: """Post-rpc interceptor for update_notebook_runtime_template - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_notebook_runtime_template_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_update_notebook_runtime_template` interceptor runs + before the `post_update_notebook_runtime_template_with_metadata` interceptor. """ return response + async def post_update_notebook_runtime_template_with_metadata( + self, + response: notebook_runtime.NotebookRuntimeTemplate, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + notebook_runtime.NotebookRuntimeTemplate, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for update_notebook_runtime_template + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_update_notebook_runtime_template_with_metadata` + interceptor in new development instead of the `post_update_notebook_runtime_template` interceptor. + When both interceptors are used, this `post_update_notebook_runtime_template_with_metadata` interceptor runs after the + `post_update_notebook_runtime_template` interceptor. The (possibly modified) response returned by + `post_update_notebook_runtime_template` will be passed to + `post_update_notebook_runtime_template_with_metadata`. + """ + return response, metadata + async def pre_upgrade_notebook_runtime( self, request: notebook_service.UpgradeNotebookRuntimeRequest, @@ -643,12 +1011,35 @@ async def post_upgrade_notebook_runtime( ) -> operations_pb2.Operation: """Post-rpc interceptor for upgrade_notebook_runtime - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_upgrade_notebook_runtime_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the NotebookService server but before - it is returned to user code. + it is returned to user code. This `post_upgrade_notebook_runtime` interceptor runs + before the `post_upgrade_notebook_runtime_with_metadata` interceptor. """ return response + async def post_upgrade_notebook_runtime_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for upgrade_notebook_runtime + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the NotebookService server but before it is returned to user code. + + We recommend only using this `post_upgrade_notebook_runtime_with_metadata` + interceptor in new development instead of the `post_upgrade_notebook_runtime` interceptor. + When both interceptors are used, this `post_upgrade_notebook_runtime_with_metadata` interceptor runs after the + `post_upgrade_notebook_runtime` interceptor. The (possibly modified) response returned by + `post_upgrade_notebook_runtime` will be passed to + `post_upgrade_notebook_runtime_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -958,9 +1349,9 @@ def __init__( self._interceptor = interceptor or AsyncNotebookServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -1191,7 +1582,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1238,6 +1629,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_assign_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_assign_notebook_runtime_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1328,11 +1725,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseCreateNotebookExecutionJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_notebook_execution_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_notebook_execution_job( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseCreateNotebookExecutionJob._get_transcoded_request( http_options, request @@ -1355,7 +1751,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1402,6 +1798,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_notebook_execution_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_notebook_execution_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1494,11 +1896,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseCreateNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_create_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_create_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseCreateNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -1521,7 +1922,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1568,6 +1969,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_notebook_runtime_template(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_notebook_runtime_template_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1657,11 +2064,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseDeleteNotebookExecutionJob._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_notebook_execution_job( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_notebook_execution_job( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseDeleteNotebookExecutionJob._get_transcoded_request( http_options, request @@ -1680,7 +2086,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1726,6 +2132,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_execution_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_notebook_execution_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1834,7 +2246,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1880,6 +2292,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_notebook_runtime_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1971,11 +2389,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseDeleteNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_delete_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_delete_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseDeleteNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -1994,7 +2411,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2040,6 +2457,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_notebook_runtime_template(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_notebook_runtime_template_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2194,6 +2617,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_notebook_execution_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_notebook_execution_job_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2277,7 +2706,10 @@ async def __call__( A runtime is a virtual machine allocated to a particular user for a particular Notebook file on temporary - basis with lifetime limited to 24 hours. + basis with lifetime. Default runtimes + have a lifetime of 18 hours, while + custom runtimes last for 6 months from + their creation or last upgrade. """ @@ -2351,6 +2783,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2444,11 +2880,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseGetNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_get_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_get_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseGetNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -2513,6 +2948,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_notebook_runtime_template(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_notebook_runtime_template_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2603,11 +3044,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseListNotebookExecutionJobs._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_notebook_execution_jobs( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_notebook_execution_jobs( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseListNotebookExecutionJobs._get_transcoded_request( http_options, request @@ -2672,6 +3112,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_notebook_execution_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_notebook_execution_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2829,6 +3275,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_notebook_runtimes(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_notebook_runtimes_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2921,11 +3371,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseListNotebookRuntimeTemplates._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_list_notebook_runtime_templates( - request, metadata + request, metadata = ( + await self._interceptor.pre_list_notebook_runtime_templates( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseListNotebookRuntimeTemplates._get_transcoded_request( http_options, request @@ -2990,6 +3439,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_notebook_runtime_templates(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_notebook_runtime_templates_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3107,7 +3562,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3154,6 +3609,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_start_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_start_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3267,7 +3726,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3314,6 +3773,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_stop_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_stop_notebook_runtime_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3408,11 +3871,10 @@ async def __call__( _BaseNotebookServiceRestTransport._BaseUpdateNotebookRuntimeTemplate._get_http_options() ) - ( - request, - metadata, - ) = await self._interceptor.pre_update_notebook_runtime_template( - request, metadata + request, metadata = ( + await self._interceptor.pre_update_notebook_runtime_template( + request, metadata + ) ) transcoded_request = _BaseNotebookServiceRestTransport._BaseUpdateNotebookRuntimeTemplate._get_transcoded_request( http_options, request @@ -3482,6 +3944,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_notebook_runtime_template(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_update_notebook_runtime_template_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3597,7 +4065,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3644,6 +4112,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_upgrade_notebook_runtime(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_upgrade_notebook_runtime_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3839,6 +4313,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -3975,6 +4453,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -3983,6 +4465,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4023,6 +4513,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -4185,6 +4679,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -4213,6 +4711,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -4325,6 +4827,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -4333,6 +4839,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4393,6 +4907,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -4555,6 +5073,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -4595,6 +5117,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -4707,6 +5233,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -4715,6 +5245,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4775,6 +5313,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -4933,6 +5475,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -4973,6 +5519,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -5085,6 +5635,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -5105,6 +5663,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -5153,6 +5715,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -5319,6 +5885,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -5351,6 +5921,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -5459,6 +6033,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -5467,6 +6045,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -5527,6 +6113,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -5722,7 +6312,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -5875,7 +6464,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -6028,7 +6616,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -6182,7 +6769,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -6340,7 +6926,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -6491,7 +7076,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -6616,7 +7200,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -6741,7 +7324,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -6894,7 +7476,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -7047,7 +7628,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_base.py index 991bd101ae..dfc8c5b32d 100644 --- a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1048,6 +1048,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -1134,6 +1138,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -1212,6 +1221,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -1393,6 +1406,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1529,6 +1546,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1537,6 +1558,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1577,6 +1606,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1758,6 +1791,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1786,6 +1823,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1898,6 +1939,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1906,6 +1951,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1966,6 +2019,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2147,6 +2204,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2187,6 +2248,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2299,6 +2364,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2307,6 +2376,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2367,6 +2444,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2544,6 +2625,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2584,6 +2669,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2696,6 +2785,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2716,6 +2813,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2764,6 +2865,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2949,6 +3054,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2981,6 +3090,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3089,6 +3202,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3097,6 +3214,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3157,6 +3282,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/__init__.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/__init__.py index 2c9d163d97..4101a7ab7e 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py index a2d7234f0f..f819872dff 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -54,14 +53,17 @@ persistent_resource as gca_persistent_resource, ) from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.cloud.aiplatform_v1.types import service_networking from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import PersistentResourceServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PersistentResourceServiceGrpcAsyncIOTransport from .client import PersistentResourceServiceClient @@ -96,6 +98,12 @@ class PersistentResourceServiceAsyncClient: parse_network_path = staticmethod( PersistentResourceServiceClient.parse_network_path ) + network_attachment_path = staticmethod( + PersistentResourceServiceClient.network_attachment_path + ) + parse_network_attachment_path = staticmethod( + PersistentResourceServiceClient.parse_network_attachment_path + ) persistent_resource_path = staticmethod( PersistentResourceServiceClient.persistent_resource_path ) @@ -150,7 +158,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: PersistentResourceServiceAsyncClient: The constructed client. """ - return PersistentResourceServiceClient.from_service_account_info.__func__(PersistentResourceServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + PersistentResourceServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(PersistentResourceServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -166,7 +177,12 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: PersistentResourceServiceAsyncClient: The constructed client. """ - return PersistentResourceServiceClient.from_service_account_file.__func__(PersistentResourceServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + PersistentResourceServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func( + PersistentResourceServiceAsyncClient, filename, *args, **kwargs + ) from_service_account_json = from_service_account_file @@ -311,21 +327,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", + "credentialsType": None, + } + ), ) async def create_persistent_resource( @@ -427,8 +445,9 @@ async def sample_create_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, persistent_resource, persistent_resource_id] + flattened_params = [parent, persistent_resource, persistent_resource_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -559,7 +578,10 @@ async def sample_get_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -676,7 +698,10 @@ async def sample_list_persistent_resources(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -816,7 +841,10 @@ async def sample_delete_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -959,7 +987,10 @@ async def sample_update_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([persistent_resource, update_mask]) + flattened_params = [persistent_resource, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1094,7 +1125,10 @@ async def sample_reboot_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1857,5 +1891,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("PersistentResourceServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py index ab694c7173..7e3bd46feb 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import operation as gca_operation @@ -68,14 +69,17 @@ persistent_resource as gca_persistent_resource, ) from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.cloud.aiplatform_v1.types import service_networking from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.field_mask_pb2 as field_mask_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import PersistentResourceServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import PersistentResourceServiceGrpcTransport from .transports.grpc_asyncio import PersistentResourceServiceGrpcAsyncIOTransport @@ -105,9 +109,9 @@ class PersistentResourceServiceClientMeta(type): _transport_registry["grpc_asyncio"] = PersistentResourceServiceGrpcAsyncIOTransport _transport_registry["rest"] = PersistentResourceServiceRestTransport if HAS_ASYNC_REST_DEPENDENCIES: # pragma: NO COVER - _transport_registry[ - "rest_asyncio" - ] = AsyncPersistentResourceServiceRestTransport + _transport_registry["rest_asyncio"] = ( + AsyncPersistentResourceServiceRestTransport + ) def get_transport_class( cls, @@ -179,6 +183,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -245,6 +277,28 @@ def parse_network_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_attachment_path( + project: str, + region: str, + networkattachment: str, + ) -> str: + """Returns a fully-qualified network_attachment string.""" + return "projects/{project}/regions/{region}/networkAttachments/{networkattachment}".format( + project=project, + region=region, + networkattachment=networkattachment, + ) + + @staticmethod + def parse_network_attachment_path(path: str) -> Dict[str, str]: + """Parses a network_attachment path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/networkAttachments/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def persistent_resource_path( project: str, @@ -407,12 +461,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = PersistentResourceServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -420,7 +470,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -452,20 +502,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = PersistentResourceServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -560,6 +604,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -652,11 +723,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = PersistentResourceServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + PersistentResourceServiceClient._read_environment_variables() + ) self._client_cert_source = ( PersistentResourceServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert @@ -771,21 +840,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.PersistentResourceServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", + "credentialsType": None, + } + ), ) def create_persistent_resource( @@ -887,8 +960,9 @@ def sample_create_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [parent, persistent_resource, persistent_resource_id] + flattened_params = [parent, persistent_resource, persistent_resource_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -1018,7 +1092,10 @@ def sample_get_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1132,7 +1209,10 @@ def sample_list_persistent_resources(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1271,7 +1351,10 @@ def sample_delete_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1413,7 +1496,10 @@ def sample_update_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([persistent_resource, update_mask]) + flattened_params = [persistent_resource, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1547,7 +1633,10 @@ def sample_reboot_persistent_resource(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1658,16 +1747,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -1713,16 +1806,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -1885,16 +1982,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -2006,16 +2107,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -2128,16 +2233,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -2188,16 +2297,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -2243,16 +2356,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -2298,21 +2415,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("PersistentResourceServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/pagers.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/pagers.py index 40738ac29c..2473690b73 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/__init__.py index 80e1eea36c..9e0e3549e6 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py index 2c7a8d6727..7d6abdf7ed 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import persistent_resource from google.cloud.aiplatform_v1.types import persistent_resource_service @@ -38,6 +39,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class PersistentResourceServiceTransport(abc.ABC): """Abstract transport class for PersistentResourceService.""" @@ -69,9 +73,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -84,8 +89,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -100,11 +103,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -315,13 +323,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py index 59cfc26fd0..41e071ddb2 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -76,12 +76,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.PersistentResourceService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -159,9 +158,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -295,9 +295,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -367,12 +368,12 @@ def create_persistent_resource( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_persistent_resource" not in self._stubs: - self._stubs[ - "create_persistent_resource" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PersistentResourceService/CreatePersistentResource", - request_serializer=persistent_resource_service.CreatePersistentResourceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_persistent_resource"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/CreatePersistentResource", + request_serializer=persistent_resource_service.CreatePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_persistent_resource"] @@ -456,12 +457,12 @@ def delete_persistent_resource( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_persistent_resource" not in self._stubs: - self._stubs[ - "delete_persistent_resource" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PersistentResourceService/DeletePersistentResource", - request_serializer=persistent_resource_service.DeletePersistentResourceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_persistent_resource"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/DeletePersistentResource", + request_serializer=persistent_resource_service.DeletePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_persistent_resource"] @@ -487,12 +488,12 @@ def update_persistent_resource( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_persistent_resource" not in self._stubs: - self._stubs[ - "update_persistent_resource" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PersistentResourceService/UpdatePersistentResource", - request_serializer=persistent_resource_service.UpdatePersistentResourceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_persistent_resource"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/UpdatePersistentResource", + request_serializer=persistent_resource_service.UpdatePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_persistent_resource"] @@ -518,12 +519,12 @@ def reboot_persistent_resource( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "reboot_persistent_resource" not in self._stubs: - self._stubs[ - "reboot_persistent_resource" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PersistentResourceService/RebootPersistentResource", - request_serializer=persistent_resource_service.RebootPersistentResourceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["reboot_persistent_resource"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/RebootPersistentResource", + request_serializer=persistent_resource_service.RebootPersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["reboot_persistent_resource"] diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py index 1bcf559377..0b33d02173 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -155,8 +155,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -207,9 +208,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -374,12 +376,12 @@ def create_persistent_resource( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_persistent_resource" not in self._stubs: - self._stubs[ - "create_persistent_resource" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PersistentResourceService/CreatePersistentResource", - request_serializer=persistent_resource_service.CreatePersistentResourceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["create_persistent_resource"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/CreatePersistentResource", + request_serializer=persistent_resource_service.CreatePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["create_persistent_resource"] @@ -463,12 +465,12 @@ def delete_persistent_resource( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_persistent_resource" not in self._stubs: - self._stubs[ - "delete_persistent_resource" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PersistentResourceService/DeletePersistentResource", - request_serializer=persistent_resource_service.DeletePersistentResourceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["delete_persistent_resource"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/DeletePersistentResource", + request_serializer=persistent_resource_service.DeletePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["delete_persistent_resource"] @@ -494,12 +496,12 @@ def update_persistent_resource( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_persistent_resource" not in self._stubs: - self._stubs[ - "update_persistent_resource" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PersistentResourceService/UpdatePersistentResource", - request_serializer=persistent_resource_service.UpdatePersistentResourceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["update_persistent_resource"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/UpdatePersistentResource", + request_serializer=persistent_resource_service.UpdatePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["update_persistent_resource"] @@ -525,12 +527,12 @@ def reboot_persistent_resource( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "reboot_persistent_resource" not in self._stubs: - self._stubs[ - "reboot_persistent_resource" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PersistentResourceService/RebootPersistentResource", - request_serializer=persistent_resource_service.RebootPersistentResourceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["reboot_persistent_resource"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/RebootPersistentResource", + request_serializer=persistent_resource_service.RebootPersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["reboot_persistent_resource"] diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py index 0dd633e1e3..0790b31798 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -64,6 +65,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class PersistentResourceServiceRestInterceptor: """Interceptor for PersistentResourceService. @@ -154,12 +158,35 @@ def post_create_persistent_resource( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_create_persistent_resource` interceptor runs + before the `post_create_persistent_resource_with_metadata` interceptor. """ return response + def post_create_persistent_resource_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_create_persistent_resource_with_metadata` + interceptor in new development instead of the `post_create_persistent_resource` interceptor. + When both interceptors are used, this `post_create_persistent_resource_with_metadata` interceptor runs after the + `post_create_persistent_resource` interceptor. The (possibly modified) response returned by + `post_create_persistent_resource` will be passed to + `post_create_persistent_resource_with_metadata`. + """ + return response, metadata + def pre_delete_persistent_resource( self, request: persistent_resource_service.DeletePersistentResourceRequest, @@ -180,12 +207,35 @@ def post_delete_persistent_resource( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_delete_persistent_resource` interceptor runs + before the `post_delete_persistent_resource_with_metadata` interceptor. """ return response + def post_delete_persistent_resource_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_delete_persistent_resource_with_metadata` + interceptor in new development instead of the `post_delete_persistent_resource` interceptor. + When both interceptors are used, this `post_delete_persistent_resource_with_metadata` interceptor runs after the + `post_delete_persistent_resource` interceptor. The (possibly modified) response returned by + `post_delete_persistent_resource` will be passed to + `post_delete_persistent_resource_with_metadata`. + """ + return response, metadata + def pre_get_persistent_resource( self, request: persistent_resource_service.GetPersistentResourceRequest, @@ -206,12 +256,37 @@ def post_get_persistent_resource( ) -> persistent_resource.PersistentResource: """Post-rpc interceptor for get_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_get_persistent_resource` interceptor runs + before the `post_get_persistent_resource_with_metadata` interceptor. """ return response + def post_get_persistent_resource_with_metadata( + self, + response: persistent_resource.PersistentResource, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + persistent_resource.PersistentResource, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_get_persistent_resource_with_metadata` + interceptor in new development instead of the `post_get_persistent_resource` interceptor. + When both interceptors are used, this `post_get_persistent_resource_with_metadata` interceptor runs after the + `post_get_persistent_resource` interceptor. The (possibly modified) response returned by + `post_get_persistent_resource` will be passed to + `post_get_persistent_resource_with_metadata`. + """ + return response, metadata + def pre_list_persistent_resources( self, request: persistent_resource_service.ListPersistentResourcesRequest, @@ -232,12 +307,38 @@ def post_list_persistent_resources( ) -> persistent_resource_service.ListPersistentResourcesResponse: """Post-rpc interceptor for list_persistent_resources - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_persistent_resources_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_list_persistent_resources` interceptor runs + before the `post_list_persistent_resources_with_metadata` interceptor. """ return response + def post_list_persistent_resources_with_metadata( + self, + response: persistent_resource_service.ListPersistentResourcesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + persistent_resource_service.ListPersistentResourcesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_persistent_resources + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_list_persistent_resources_with_metadata` + interceptor in new development instead of the `post_list_persistent_resources` interceptor. + When both interceptors are used, this `post_list_persistent_resources_with_metadata` interceptor runs after the + `post_list_persistent_resources` interceptor. The (possibly modified) response returned by + `post_list_persistent_resources` will be passed to + `post_list_persistent_resources_with_metadata`. + """ + return response, metadata + def pre_reboot_persistent_resource( self, request: persistent_resource_service.RebootPersistentResourceRequest, @@ -258,12 +359,35 @@ def post_reboot_persistent_resource( ) -> operations_pb2.Operation: """Post-rpc interceptor for reboot_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_reboot_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_reboot_persistent_resource` interceptor runs + before the `post_reboot_persistent_resource_with_metadata` interceptor. """ return response + def post_reboot_persistent_resource_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for reboot_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_reboot_persistent_resource_with_metadata` + interceptor in new development instead of the `post_reboot_persistent_resource` interceptor. + When both interceptors are used, this `post_reboot_persistent_resource_with_metadata` interceptor runs after the + `post_reboot_persistent_resource` interceptor. The (possibly modified) response returned by + `post_reboot_persistent_resource` will be passed to + `post_reboot_persistent_resource_with_metadata`. + """ + return response, metadata + def pre_update_persistent_resource( self, request: persistent_resource_service.UpdatePersistentResourceRequest, @@ -284,12 +408,35 @@ def post_update_persistent_resource( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_update_persistent_resource` interceptor runs + before the `post_update_persistent_resource_with_metadata` interceptor. """ return response + def post_update_persistent_resource_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_update_persistent_resource_with_metadata` + interceptor in new development instead of the `post_update_persistent_resource` interceptor. + When both interceptors are used, this `post_update_persistent_resource_with_metadata` interceptor runs after the + `post_update_persistent_resource` interceptor. The (possibly modified) response returned by + `post_update_persistent_resource` will be passed to + `post_update_persistent_resource_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -582,9 +729,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -795,6 +943,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -931,6 +1083,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -939,6 +1095,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -979,6 +1143,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1141,6 +1309,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1169,6 +1341,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1281,6 +1457,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1289,6 +1469,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1349,6 +1537,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1511,6 +1703,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1551,6 +1747,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1663,6 +1863,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1671,6 +1875,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1731,6 +1943,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -1889,6 +2105,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -1929,6 +2149,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2041,6 +2265,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2061,6 +2293,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2109,6 +2345,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2275,6 +2515,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2307,6 +2551,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2415,6 +2663,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2423,6 +2675,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2483,6 +2743,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2594,7 +2858,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2634,6 +2898,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_persistent_resource_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2744,7 +3012,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2783,6 +3051,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_persistent_resource_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2933,6 +3205,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_persistent_resource_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3086,6 +3362,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_persistent_resources(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_persistent_resources_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3203,7 +3483,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3243,6 +3523,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_reboot_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_reboot_persistent_resource_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3358,7 +3642,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3398,6 +3682,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_update_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_persistent_resource_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3529,7 +3817,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -3674,7 +3961,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3819,7 +4105,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -3965,7 +4250,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4115,7 +4399,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4260,7 +4543,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4379,7 +4661,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4498,7 +4779,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -4643,7 +4923,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -4788,7 +5067,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py index 2ad39d5e12..76f51141ea 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -82,6 +82,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncPersistentResourceServiceRestInterceptor: """Asynchronous Interceptor for PersistentResourceService. @@ -172,12 +175,35 @@ async def post_create_persistent_resource( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_create_persistent_resource` interceptor runs + before the `post_create_persistent_resource_with_metadata` interceptor. """ return response + async def post_create_persistent_resource_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_create_persistent_resource_with_metadata` + interceptor in new development instead of the `post_create_persistent_resource` interceptor. + When both interceptors are used, this `post_create_persistent_resource_with_metadata` interceptor runs after the + `post_create_persistent_resource` interceptor. The (possibly modified) response returned by + `post_create_persistent_resource` will be passed to + `post_create_persistent_resource_with_metadata`. + """ + return response, metadata + async def pre_delete_persistent_resource( self, request: persistent_resource_service.DeletePersistentResourceRequest, @@ -198,12 +224,35 @@ async def post_delete_persistent_resource( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_delete_persistent_resource` interceptor runs + before the `post_delete_persistent_resource_with_metadata` interceptor. """ return response + async def post_delete_persistent_resource_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_delete_persistent_resource_with_metadata` + interceptor in new development instead of the `post_delete_persistent_resource` interceptor. + When both interceptors are used, this `post_delete_persistent_resource_with_metadata` interceptor runs after the + `post_delete_persistent_resource` interceptor. The (possibly modified) response returned by + `post_delete_persistent_resource` will be passed to + `post_delete_persistent_resource_with_metadata`. + """ + return response, metadata + async def pre_get_persistent_resource( self, request: persistent_resource_service.GetPersistentResourceRequest, @@ -224,12 +273,37 @@ async def post_get_persistent_resource( ) -> persistent_resource.PersistentResource: """Post-rpc interceptor for get_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_get_persistent_resource` interceptor runs + before the `post_get_persistent_resource_with_metadata` interceptor. """ return response + async def post_get_persistent_resource_with_metadata( + self, + response: persistent_resource.PersistentResource, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + persistent_resource.PersistentResource, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_get_persistent_resource_with_metadata` + interceptor in new development instead of the `post_get_persistent_resource` interceptor. + When both interceptors are used, this `post_get_persistent_resource_with_metadata` interceptor runs after the + `post_get_persistent_resource` interceptor. The (possibly modified) response returned by + `post_get_persistent_resource` will be passed to + `post_get_persistent_resource_with_metadata`. + """ + return response, metadata + async def pre_list_persistent_resources( self, request: persistent_resource_service.ListPersistentResourcesRequest, @@ -250,12 +324,38 @@ async def post_list_persistent_resources( ) -> persistent_resource_service.ListPersistentResourcesResponse: """Post-rpc interceptor for list_persistent_resources - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_persistent_resources_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_list_persistent_resources` interceptor runs + before the `post_list_persistent_resources_with_metadata` interceptor. """ return response + async def post_list_persistent_resources_with_metadata( + self, + response: persistent_resource_service.ListPersistentResourcesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + persistent_resource_service.ListPersistentResourcesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_persistent_resources + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_list_persistent_resources_with_metadata` + interceptor in new development instead of the `post_list_persistent_resources` interceptor. + When both interceptors are used, this `post_list_persistent_resources_with_metadata` interceptor runs after the + `post_list_persistent_resources` interceptor. The (possibly modified) response returned by + `post_list_persistent_resources` will be passed to + `post_list_persistent_resources_with_metadata`. + """ + return response, metadata + async def pre_reboot_persistent_resource( self, request: persistent_resource_service.RebootPersistentResourceRequest, @@ -276,12 +376,35 @@ async def post_reboot_persistent_resource( ) -> operations_pb2.Operation: """Post-rpc interceptor for reboot_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_reboot_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_reboot_persistent_resource` interceptor runs + before the `post_reboot_persistent_resource_with_metadata` interceptor. """ return response + async def post_reboot_persistent_resource_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for reboot_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_reboot_persistent_resource_with_metadata` + interceptor in new development instead of the `post_reboot_persistent_resource` interceptor. + When both interceptors are used, this `post_reboot_persistent_resource_with_metadata` interceptor runs after the + `post_reboot_persistent_resource` interceptor. The (possibly modified) response returned by + `post_reboot_persistent_resource` will be passed to + `post_reboot_persistent_resource_with_metadata`. + """ + return response, metadata + async def pre_update_persistent_resource( self, request: persistent_resource_service.UpdatePersistentResourceRequest, @@ -302,12 +425,35 @@ async def post_update_persistent_resource( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_persistent_resource - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_persistent_resource_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PersistentResourceService server but before - it is returned to user code. + it is returned to user code. This `post_update_persistent_resource` interceptor runs + before the `post_update_persistent_resource_with_metadata` interceptor. """ return response + async def post_update_persistent_resource_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_persistent_resource + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PersistentResourceService server but before it is returned to user code. + + We recommend only using this `post_update_persistent_resource_with_metadata` + interceptor in new development instead of the `post_update_persistent_resource` interceptor. + When both interceptors are used, this `post_update_persistent_resource_with_metadata` interceptor runs after the + `post_update_persistent_resource` interceptor. The (possibly modified) response returned by + `post_update_persistent_resource` will be passed to + `post_update_persistent_resource_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -621,9 +767,9 @@ def __init__( ) self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -807,7 +953,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -854,6 +1000,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_persistent_resource_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -965,7 +1117,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1011,6 +1163,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_persistent_resource_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1169,6 +1327,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_get_persistent_resource_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1328,6 +1492,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_persistent_resources(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_persistent_resources_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1446,7 +1616,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1493,6 +1663,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_reboot_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_reboot_persistent_resource_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1609,7 +1785,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1656,6 +1832,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_update_persistent_resource(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_update_persistent_resource_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1851,6 +2033,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1987,6 +2173,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1995,6 +2185,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2035,6 +2233,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -2197,6 +2399,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -2225,6 +2431,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -2337,6 +2547,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2345,6 +2559,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2405,6 +2627,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -2567,6 +2793,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -2607,6 +2837,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2719,6 +2953,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2727,6 +2965,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2787,6 +3033,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2945,6 +3195,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2985,6 +3239,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -3097,6 +3355,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -3117,6 +3383,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -3165,6 +3435,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3331,6 +3605,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -3363,6 +3641,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -3471,6 +3753,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -3479,6 +3765,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -3539,6 +3833,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -3653,7 +3951,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -3804,7 +4101,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -3955,7 +4251,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -4107,7 +4402,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -4265,7 +4559,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -4416,7 +4709,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -4539,7 +4831,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -4662,7 +4953,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -4813,7 +5103,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -4964,7 +5253,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_base.py index d1576ba744..a51b25bcde 100644 --- a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -543,6 +543,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -629,6 +633,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -707,6 +716,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -888,6 +901,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1024,6 +1041,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1032,6 +1053,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1072,6 +1101,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1253,6 +1286,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1281,6 +1318,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1393,6 +1434,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1401,6 +1446,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1461,6 +1514,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1642,6 +1699,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1682,6 +1743,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1794,6 +1859,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1802,6 +1871,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1862,6 +1939,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2039,6 +2120,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2079,6 +2164,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2191,6 +2280,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2211,6 +2308,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2259,6 +2360,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2444,6 +2549,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2476,6 +2585,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2584,6 +2697,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2592,6 +2709,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2652,6 +2777,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py b/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py index 85e676d844..3fb034d0b0 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index 007f2053c7..8029eff7bc 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -44,8 +45,6 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model @@ -54,16 +53,19 @@ from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import service_networking from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport from .client import PipelineServiceClient @@ -108,6 +110,12 @@ class PipelineServiceAsyncClient: parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) network_path = staticmethod(PipelineServiceClient.network_path) parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) + network_attachment_path = staticmethod( + PipelineServiceClient.network_attachment_path + ) + parse_network_attachment_path = staticmethod( + PipelineServiceClient.parse_network_attachment_path + ) pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) parse_pipeline_job_path = staticmethod( PipelineServiceClient.parse_pipeline_job_path @@ -154,7 +162,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: PipelineServiceAsyncClient: The constructed client. """ - return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + PipelineServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(PipelineServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -170,7 +181,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: PipelineServiceAsyncClient: The constructed client. """ - return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + PipelineServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(PipelineServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -313,21 +327,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.PipelineServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.PipelineService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.PipelineService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.PipelineService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.PipelineService", + "credentialsType": None, + } + ), ) async def create_training_pipeline( @@ -417,7 +433,10 @@ async def sample_create_training_pipeline(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) + flattened_params = [parent, training_pipeline] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -534,7 +553,10 @@ async def sample_get_training_pipeline(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -649,7 +671,10 @@ async def sample_list_training_pipelines(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -785,7 +810,10 @@ async def sample_delete_training_pipeline(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -907,7 +935,10 @@ async def sample_cancel_training_pipeline(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1034,7 +1065,10 @@ async def sample_create_pipeline_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + flattened_params = [parent, pipeline_job, pipeline_job_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1146,7 +1180,10 @@ async def sample_get_pipeline_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1259,7 +1296,10 @@ async def sample_list_pipeline_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1395,7 +1435,10 @@ async def sample_delete_pipeline_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1533,7 +1576,10 @@ async def sample_batch_delete_pipeline_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, names]) + flattened_params = [parent, names] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1656,7 +1702,10 @@ async def sample_cancel_pipeline_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1788,7 +1837,10 @@ async def sample_batch_cancel_pipeline_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, names]) + flattened_params = [parent, names] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2549,5 +2601,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("PipelineServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index 7720e7f25a..627654278f 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -43,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -58,8 +61,6 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1.services.pipeline_service import pagers from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model @@ -68,16 +69,19 @@ from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import service_networking from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore +import google.api_core.operation as gac_operation # type: ignore +import google.api_core.operation_async as operation_async # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore +import google.rpc.status_pb2 as status_pb2 # type: ignore from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import PipelineServiceGrpcTransport from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport @@ -181,6 +185,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -385,6 +417,28 @@ def parse_network_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_attachment_path( + project: str, + region: str, + networkattachment: str, + ) -> str: + """Returns a fully-qualified network_attachment string.""" + return "projects/{project}/regions/{region}/networkAttachments/{networkattachment}".format( + project=project, + region=region, + networkattachment=networkattachment, + ) + + @staticmethod + def parse_network_attachment_path(path: str) -> Dict[str, str]: + """Parses a network_attachment path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/networkAttachments/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def pipeline_job_path( project: str, @@ -547,12 +601,8 @@ def get_mtls_endpoint_and_cert_source( ) if client_options is None: client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_client_cert = PipelineServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" @@ -560,7 +610,7 @@ def get_mtls_endpoint_and_cert_source( # Figure out the client cert source to use. client_cert_source = None - if use_client_cert == "true": + if use_client_cert: if client_options.client_cert_source: client_cert_source = client_options.client_cert_source elif mtls.has_default_client_cert_source(): @@ -592,20 +642,14 @@ def _read_environment_variables(): google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT is not any of ["auto", "never", "always"]. """ - use_client_cert = os.getenv( - "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" - ).lower() + use_client_cert = PipelineServiceClient._use_client_cert_effective() use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) if use_mtls_endpoint not in ("auto", "never", "always"): raise MutualTLSChannelError( "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + return use_client_cert, use_mtls_endpoint, universe_domain_env @staticmethod def _get_client_cert_source(provided_cert_source, use_cert_flag): @@ -698,6 +742,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -788,11 +859,9 @@ def __init__( universe_domain_opt = getattr(self._client_options, "universe_domain", None) - ( - self._use_client_cert, - self._use_mtls_endpoint, - self._universe_domain_env, - ) = PipelineServiceClient._read_environment_variables() + self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = ( + PipelineServiceClient._read_environment_variables() + ) self._client_cert_source = PipelineServiceClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) @@ -904,21 +973,25 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.PipelineServiceClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.PipelineService", - "universeDomain": getattr( - self._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.PipelineService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.PipelineService", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, + "get_cred_info", + lambda: None, + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.PipelineService", + "credentialsType": None, + } + ), ) def create_training_pipeline( @@ -1008,7 +1081,10 @@ def sample_create_training_pipeline(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) + flattened_params = [parent, training_pipeline] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1122,7 +1198,10 @@ def sample_get_training_pipeline(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1234,7 +1313,10 @@ def sample_list_training_pipelines(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1367,7 +1449,10 @@ def sample_delete_training_pipeline(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1486,7 +1571,10 @@ def sample_cancel_training_pipeline(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1610,7 +1698,10 @@ def sample_create_pipeline_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + flattened_params = [parent, pipeline_job, pipeline_job_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1719,7 +1810,10 @@ def sample_get_pipeline_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1829,7 +1923,10 @@ def sample_list_pipeline_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1962,7 +2059,10 @@ def sample_delete_pipeline_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2097,7 +2197,10 @@ def sample_batch_delete_pipeline_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, names]) + flattened_params = [parent, names] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2219,7 +2322,10 @@ def sample_cancel_pipeline_job(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2348,7 +2454,10 @@ def sample_batch_cancel_pipeline_jobs(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, names]) + flattened_params = [parent, names] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2457,16 +2566,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -2512,16 +2625,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -2684,16 +2801,20 @@ def wait_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def set_iam_policy( self, @@ -2805,16 +2926,20 @@ def set_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_iam_policy( self, @@ -2927,16 +3052,20 @@ def get_iam_policy( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def test_iam_permissions( self, @@ -2987,16 +3116,20 @@ def test_iam_permissions( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_location( self, @@ -3042,16 +3175,20 @@ def get_location( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def list_locations( self, @@ -3097,21 +3234,27 @@ def list_locations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("PipelineServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py index bf20ba5084..7e95e05112 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py index 3e78c35de2..516241d5cc 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index 2b03f066bb..07c8cea588 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.aiplatform_v1.types import pipeline_job from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job @@ -36,12 +37,15 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class PipelineServiceTransport(abc.ABC): """Abstract transport class for PipelineService.""" @@ -73,9 +77,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. @@ -88,8 +93,6 @@ def __init__( be used for service account credentials. """ - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - # Save the scopes. self._scopes = scopes if not hasattr(self, "_ignore_credentials"): @@ -104,11 +107,16 @@ def __init__( if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( - credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( - **scopes_kwargs, quota_project_id=quota_project_id + scopes=scopes, + quota_project_id=quota_project_id, + default_scopes=self.AUTH_SCOPES, ) # Don't apply audience if the credentials file passed from user. if hasattr(credentials, "with_gdch_audience"): @@ -409,13 +417,19 @@ def get_operation( @property def cancel_operation( self, - ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + None, + ]: raise NotImplementedError() @property def delete_operation( self, - ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + None, + ]: raise NotImplementedError() @property diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index 8d08918c2f..f550576884 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,7 +40,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO try: @@ -80,12 +80,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.cloud.aiplatform.v1.PipelineService", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() @@ -165,9 +164,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -301,9 +301,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -641,12 +642,12 @@ def batch_delete_pipeline_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_delete_pipeline_jobs" not in self._stubs: - self._stubs[ - "batch_delete_pipeline_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PipelineService/BatchDeletePipelineJobs", - request_serializer=pipeline_service.BatchDeletePipelineJobsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["batch_delete_pipeline_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/BatchDeletePipelineJobs", + request_serializer=pipeline_service.BatchDeletePipelineJobsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["batch_delete_pipeline_jobs"] @@ -717,12 +718,12 @@ def batch_cancel_pipeline_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_cancel_pipeline_jobs" not in self._stubs: - self._stubs[ - "batch_cancel_pipeline_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PipelineService/BatchCancelPipelineJobs", - request_serializer=pipeline_service.BatchCancelPipelineJobsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["batch_cancel_pipeline_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/BatchCancelPipelineJobs", + request_serializer=pipeline_service.BatchCancelPipelineJobsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["batch_cancel_pipeline_jobs"] diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index f3d7b7275a..220b305912 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO from .grpc import PipelineServiceGrpcTransport @@ -161,8 +161,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -213,9 +214,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -655,12 +657,12 @@ def batch_delete_pipeline_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_delete_pipeline_jobs" not in self._stubs: - self._stubs[ - "batch_delete_pipeline_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PipelineService/BatchDeletePipelineJobs", - request_serializer=pipeline_service.BatchDeletePipelineJobsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["batch_delete_pipeline_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/BatchDeletePipelineJobs", + request_serializer=pipeline_service.BatchDeletePipelineJobsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["batch_delete_pipeline_jobs"] @@ -734,12 +736,12 @@ def batch_cancel_pipeline_jobs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_cancel_pipeline_jobs" not in self._stubs: - self._stubs[ - "batch_cancel_pipeline_jobs" - ] = self._logged_channel.unary_unary( - "/google.cloud.aiplatform.v1.PipelineService/BatchCancelPipelineJobs", - request_serializer=pipeline_service.BatchCancelPipelineJobsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, + self._stubs["batch_cancel_pipeline_jobs"] = ( + self._logged_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/BatchCancelPipelineJobs", + request_serializer=pipeline_service.BatchCancelPipelineJobsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) ) return self._stubs["batch_cancel_pipeline_jobs"] diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py index 9df07cc5a8..744f8720ae 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -41,7 +42,7 @@ from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -68,6 +69,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class PipelineServiceRestInterceptor: """Interceptor for PipelineService. @@ -198,12 +202,35 @@ def post_batch_cancel_pipeline_jobs( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_cancel_pipeline_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_cancel_pipeline_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_batch_cancel_pipeline_jobs` interceptor runs + before the `post_batch_cancel_pipeline_jobs_with_metadata` interceptor. """ return response + def post_batch_cancel_pipeline_jobs_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_cancel_pipeline_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_batch_cancel_pipeline_jobs_with_metadata` + interceptor in new development instead of the `post_batch_cancel_pipeline_jobs` interceptor. + When both interceptors are used, this `post_batch_cancel_pipeline_jobs_with_metadata` interceptor runs after the + `post_batch_cancel_pipeline_jobs` interceptor. The (possibly modified) response returned by + `post_batch_cancel_pipeline_jobs` will be passed to + `post_batch_cancel_pipeline_jobs_with_metadata`. + """ + return response, metadata + def pre_batch_delete_pipeline_jobs( self, request: pipeline_service.BatchDeletePipelineJobsRequest, @@ -224,12 +251,35 @@ def post_batch_delete_pipeline_jobs( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_delete_pipeline_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_delete_pipeline_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_batch_delete_pipeline_jobs` interceptor runs + before the `post_batch_delete_pipeline_jobs_with_metadata` interceptor. """ return response + def post_batch_delete_pipeline_jobs_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_delete_pipeline_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_batch_delete_pipeline_jobs_with_metadata` + interceptor in new development instead of the `post_batch_delete_pipeline_jobs` interceptor. + When both interceptors are used, this `post_batch_delete_pipeline_jobs_with_metadata` interceptor runs after the + `post_batch_delete_pipeline_jobs` interceptor. The (possibly modified) response returned by + `post_batch_delete_pipeline_jobs` will be passed to + `post_batch_delete_pipeline_jobs_with_metadata`. + """ + return response, metadata + def pre_cancel_pipeline_job( self, request: pipeline_service.CancelPipelineJobRequest, @@ -280,12 +330,35 @@ def post_create_pipeline_job( ) -> gca_pipeline_job.PipelineJob: """Post-rpc interceptor for create_pipeline_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_pipeline_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_create_pipeline_job` interceptor runs + before the `post_create_pipeline_job_with_metadata` interceptor. """ return response + def post_create_pipeline_job_with_metadata( + self, + response: gca_pipeline_job.PipelineJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_pipeline_job.PipelineJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_pipeline_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_create_pipeline_job_with_metadata` + interceptor in new development instead of the `post_create_pipeline_job` interceptor. + When both interceptors are used, this `post_create_pipeline_job_with_metadata` interceptor runs after the + `post_create_pipeline_job` interceptor. The (possibly modified) response returned by + `post_create_pipeline_job` will be passed to + `post_create_pipeline_job_with_metadata`. + """ + return response, metadata + def pre_create_training_pipeline( self, request: pipeline_service.CreateTrainingPipelineRequest, @@ -306,12 +379,37 @@ def post_create_training_pipeline( ) -> gca_training_pipeline.TrainingPipeline: """Post-rpc interceptor for create_training_pipeline - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_training_pipeline_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_create_training_pipeline` interceptor runs + before the `post_create_training_pipeline_with_metadata` interceptor. """ return response + def post_create_training_pipeline_with_metadata( + self, + response: gca_training_pipeline.TrainingPipeline, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_training_pipeline.TrainingPipeline, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_training_pipeline + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_create_training_pipeline_with_metadata` + interceptor in new development instead of the `post_create_training_pipeline` interceptor. + When both interceptors are used, this `post_create_training_pipeline_with_metadata` interceptor runs after the + `post_create_training_pipeline` interceptor. The (possibly modified) response returned by + `post_create_training_pipeline` will be passed to + `post_create_training_pipeline_with_metadata`. + """ + return response, metadata + def pre_delete_pipeline_job( self, request: pipeline_service.DeletePipelineJobRequest, @@ -332,12 +430,35 @@ def post_delete_pipeline_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_pipeline_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_pipeline_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_delete_pipeline_job` interceptor runs + before the `post_delete_pipeline_job_with_metadata` interceptor. """ return response + def post_delete_pipeline_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_pipeline_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_delete_pipeline_job_with_metadata` + interceptor in new development instead of the `post_delete_pipeline_job` interceptor. + When both interceptors are used, this `post_delete_pipeline_job_with_metadata` interceptor runs after the + `post_delete_pipeline_job` interceptor. The (possibly modified) response returned by + `post_delete_pipeline_job` will be passed to + `post_delete_pipeline_job_with_metadata`. + """ + return response, metadata + def pre_delete_training_pipeline( self, request: pipeline_service.DeleteTrainingPipelineRequest, @@ -358,12 +479,35 @@ def post_delete_training_pipeline( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_training_pipeline - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_training_pipeline_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_delete_training_pipeline` interceptor runs + before the `post_delete_training_pipeline_with_metadata` interceptor. """ return response + def post_delete_training_pipeline_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_training_pipeline + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_delete_training_pipeline_with_metadata` + interceptor in new development instead of the `post_delete_training_pipeline` interceptor. + When both interceptors are used, this `post_delete_training_pipeline_with_metadata` interceptor runs after the + `post_delete_training_pipeline` interceptor. The (possibly modified) response returned by + `post_delete_training_pipeline` will be passed to + `post_delete_training_pipeline_with_metadata`. + """ + return response, metadata + def pre_get_pipeline_job( self, request: pipeline_service.GetPipelineJobRequest, @@ -383,12 +527,35 @@ def post_get_pipeline_job( ) -> pipeline_job.PipelineJob: """Post-rpc interceptor for get_pipeline_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_pipeline_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_get_pipeline_job` interceptor runs + before the `post_get_pipeline_job_with_metadata` interceptor. """ return response + def post_get_pipeline_job_with_metadata( + self, + response: pipeline_job.PipelineJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[pipeline_job.PipelineJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_pipeline_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_get_pipeline_job_with_metadata` + interceptor in new development instead of the `post_get_pipeline_job` interceptor. + When both interceptors are used, this `post_get_pipeline_job_with_metadata` interceptor runs after the + `post_get_pipeline_job` interceptor. The (possibly modified) response returned by + `post_get_pipeline_job` will be passed to + `post_get_pipeline_job_with_metadata`. + """ + return response, metadata + def pre_get_training_pipeline( self, request: pipeline_service.GetTrainingPipelineRequest, @@ -409,12 +576,37 @@ def post_get_training_pipeline( ) -> training_pipeline.TrainingPipeline: """Post-rpc interceptor for get_training_pipeline - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_training_pipeline_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_get_training_pipeline` interceptor runs + before the `post_get_training_pipeline_with_metadata` interceptor. """ return response + def post_get_training_pipeline_with_metadata( + self, + response: training_pipeline.TrainingPipeline, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + training_pipeline.TrainingPipeline, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_training_pipeline + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_get_training_pipeline_with_metadata` + interceptor in new development instead of the `post_get_training_pipeline` interceptor. + When both interceptors are used, this `post_get_training_pipeline_with_metadata` interceptor runs after the + `post_get_training_pipeline` interceptor. The (possibly modified) response returned by + `post_get_training_pipeline` will be passed to + `post_get_training_pipeline_with_metadata`. + """ + return response, metadata + def pre_list_pipeline_jobs( self, request: pipeline_service.ListPipelineJobsRequest, @@ -435,12 +627,38 @@ def post_list_pipeline_jobs( ) -> pipeline_service.ListPipelineJobsResponse: """Post-rpc interceptor for list_pipeline_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_pipeline_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_list_pipeline_jobs` interceptor runs + before the `post_list_pipeline_jobs_with_metadata` interceptor. """ return response + def post_list_pipeline_jobs_with_metadata( + self, + response: pipeline_service.ListPipelineJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + pipeline_service.ListPipelineJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_pipeline_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_list_pipeline_jobs_with_metadata` + interceptor in new development instead of the `post_list_pipeline_jobs` interceptor. + When both interceptors are used, this `post_list_pipeline_jobs_with_metadata` interceptor runs after the + `post_list_pipeline_jobs` interceptor. The (possibly modified) response returned by + `post_list_pipeline_jobs` will be passed to + `post_list_pipeline_jobs_with_metadata`. + """ + return response, metadata + def pre_list_training_pipelines( self, request: pipeline_service.ListTrainingPipelinesRequest, @@ -461,12 +679,38 @@ def post_list_training_pipelines( ) -> pipeline_service.ListTrainingPipelinesResponse: """Post-rpc interceptor for list_training_pipelines - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_training_pipelines_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_list_training_pipelines` interceptor runs + before the `post_list_training_pipelines_with_metadata` interceptor. """ return response + def post_list_training_pipelines_with_metadata( + self, + response: pipeline_service.ListTrainingPipelinesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + pipeline_service.ListTrainingPipelinesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_training_pipelines + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_list_training_pipelines_with_metadata` + interceptor in new development instead of the `post_list_training_pipelines` interceptor. + When both interceptors are used, this `post_list_training_pipelines_with_metadata` interceptor runs after the + `post_list_training_pipelines` interceptor. The (possibly modified) response returned by + `post_list_training_pipelines` will be passed to + `post_list_training_pipelines_with_metadata`. + """ + return response, metadata + def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -759,9 +1003,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -972,6 +1217,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1108,6 +1357,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1116,6 +1369,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1156,6 +1417,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -1318,6 +1583,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1346,6 +1615,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1458,6 +1731,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1466,6 +1743,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1526,6 +1811,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -1688,6 +1977,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1728,6 +2021,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -1840,6 +2137,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1848,6 +2149,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1908,6 +2217,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -2066,6 +2379,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2106,6 +2423,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2218,6 +2539,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2238,6 +2567,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2286,6 +2619,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2452,6 +2789,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2484,6 +2825,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2592,6 +2937,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2600,6 +2949,14 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2660,6 +3017,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -2769,7 +3130,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2811,6 +3172,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_batch_cancel_pipeline_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_batch_cancel_pipeline_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2924,7 +3289,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2966,6 +3331,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_batch_delete_pipeline_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_batch_delete_pipeline_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3071,7 +3440,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3188,7 +3557,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3355,6 +3724,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_pipeline_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_pipeline_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3514,6 +3887,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_create_training_pipeline(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_training_pipeline_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3623,7 +4000,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3662,6 +4039,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_pipeline_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_pipeline_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3769,7 +4150,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -3810,6 +4191,10 @@ def __call__( json_format.Parse(response.content, resp, ignore_unknown_fields=True) resp = self._interceptor.post_delete_training_pipeline(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_delete_training_pipeline_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -3956,6 +4341,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_pipeline_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_pipeline_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4107,6 +4496,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_get_training_pipeline(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_training_pipeline_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4255,6 +4648,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_pipeline_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_pipeline_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4406,6 +4803,10 @@ def __call__( json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) resp = self._interceptor.post_list_training_pipelines(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_training_pipelines_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -4590,7 +4991,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -4732,7 +5132,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -4874,7 +5273,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -5017,7 +5415,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -5165,7 +5562,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -5309,7 +5705,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -5425,7 +5820,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -5541,7 +5935,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -5683,7 +6076,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -5825,7 +6217,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py index e0a4673ba0..be972600d2 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ from google.api_core import retry_async as retries from google.api_core import rest_helpers from google.api_core import rest_streaming_async # type: ignore - +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -55,7 +55,7 @@ from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -86,6 +86,9 @@ rest_version=f"google-auth@{google.auth.__version__}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class AsyncPipelineServiceRestInterceptor: """Asynchronous Interceptor for PipelineService. @@ -216,12 +219,35 @@ async def post_batch_cancel_pipeline_jobs( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_cancel_pipeline_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_cancel_pipeline_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_batch_cancel_pipeline_jobs` interceptor runs + before the `post_batch_cancel_pipeline_jobs_with_metadata` interceptor. """ return response + async def post_batch_cancel_pipeline_jobs_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_cancel_pipeline_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_batch_cancel_pipeline_jobs_with_metadata` + interceptor in new development instead of the `post_batch_cancel_pipeline_jobs` interceptor. + When both interceptors are used, this `post_batch_cancel_pipeline_jobs_with_metadata` interceptor runs after the + `post_batch_cancel_pipeline_jobs` interceptor. The (possibly modified) response returned by + `post_batch_cancel_pipeline_jobs` will be passed to + `post_batch_cancel_pipeline_jobs_with_metadata`. + """ + return response, metadata + async def pre_batch_delete_pipeline_jobs( self, request: pipeline_service.BatchDeletePipelineJobsRequest, @@ -242,12 +268,35 @@ async def post_batch_delete_pipeline_jobs( ) -> operations_pb2.Operation: """Post-rpc interceptor for batch_delete_pipeline_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_delete_pipeline_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_batch_delete_pipeline_jobs` interceptor runs + before the `post_batch_delete_pipeline_jobs_with_metadata` interceptor. """ return response + async def post_batch_delete_pipeline_jobs_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for batch_delete_pipeline_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_batch_delete_pipeline_jobs_with_metadata` + interceptor in new development instead of the `post_batch_delete_pipeline_jobs` interceptor. + When both interceptors are used, this `post_batch_delete_pipeline_jobs_with_metadata` interceptor runs after the + `post_batch_delete_pipeline_jobs` interceptor. The (possibly modified) response returned by + `post_batch_delete_pipeline_jobs` will be passed to + `post_batch_delete_pipeline_jobs_with_metadata`. + """ + return response, metadata + async def pre_cancel_pipeline_job( self, request: pipeline_service.CancelPipelineJobRequest, @@ -298,12 +347,35 @@ async def post_create_pipeline_job( ) -> gca_pipeline_job.PipelineJob: """Post-rpc interceptor for create_pipeline_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_pipeline_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_create_pipeline_job` interceptor runs + before the `post_create_pipeline_job_with_metadata` interceptor. """ return response + async def post_create_pipeline_job_with_metadata( + self, + response: gca_pipeline_job.PipelineJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gca_pipeline_job.PipelineJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_pipeline_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_create_pipeline_job_with_metadata` + interceptor in new development instead of the `post_create_pipeline_job` interceptor. + When both interceptors are used, this `post_create_pipeline_job_with_metadata` interceptor runs after the + `post_create_pipeline_job` interceptor. The (possibly modified) response returned by + `post_create_pipeline_job` will be passed to + `post_create_pipeline_job_with_metadata`. + """ + return response, metadata + async def pre_create_training_pipeline( self, request: pipeline_service.CreateTrainingPipelineRequest, @@ -324,12 +396,37 @@ async def post_create_training_pipeline( ) -> gca_training_pipeline.TrainingPipeline: """Post-rpc interceptor for create_training_pipeline - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_training_pipeline_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_create_training_pipeline` interceptor runs + before the `post_create_training_pipeline_with_metadata` interceptor. """ return response + async def post_create_training_pipeline_with_metadata( + self, + response: gca_training_pipeline.TrainingPipeline, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gca_training_pipeline.TrainingPipeline, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_training_pipeline + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_create_training_pipeline_with_metadata` + interceptor in new development instead of the `post_create_training_pipeline` interceptor. + When both interceptors are used, this `post_create_training_pipeline_with_metadata` interceptor runs after the + `post_create_training_pipeline` interceptor. The (possibly modified) response returned by + `post_create_training_pipeline` will be passed to + `post_create_training_pipeline_with_metadata`. + """ + return response, metadata + async def pre_delete_pipeline_job( self, request: pipeline_service.DeletePipelineJobRequest, @@ -350,12 +447,35 @@ async def post_delete_pipeline_job( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_pipeline_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_pipeline_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_delete_pipeline_job` interceptor runs + before the `post_delete_pipeline_job_with_metadata` interceptor. """ return response + async def post_delete_pipeline_job_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_pipeline_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_delete_pipeline_job_with_metadata` + interceptor in new development instead of the `post_delete_pipeline_job` interceptor. + When both interceptors are used, this `post_delete_pipeline_job_with_metadata` interceptor runs after the + `post_delete_pipeline_job` interceptor. The (possibly modified) response returned by + `post_delete_pipeline_job` will be passed to + `post_delete_pipeline_job_with_metadata`. + """ + return response, metadata + async def pre_delete_training_pipeline( self, request: pipeline_service.DeleteTrainingPipelineRequest, @@ -376,12 +496,35 @@ async def post_delete_training_pipeline( ) -> operations_pb2.Operation: """Post-rpc interceptor for delete_training_pipeline - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_delete_training_pipeline_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_delete_training_pipeline` interceptor runs + before the `post_delete_training_pipeline_with_metadata` interceptor. """ return response + async def post_delete_training_pipeline_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for delete_training_pipeline + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_delete_training_pipeline_with_metadata` + interceptor in new development instead of the `post_delete_training_pipeline` interceptor. + When both interceptors are used, this `post_delete_training_pipeline_with_metadata` interceptor runs after the + `post_delete_training_pipeline` interceptor. The (possibly modified) response returned by + `post_delete_training_pipeline` will be passed to + `post_delete_training_pipeline_with_metadata`. + """ + return response, metadata + async def pre_get_pipeline_job( self, request: pipeline_service.GetPipelineJobRequest, @@ -401,12 +544,35 @@ async def post_get_pipeline_job( ) -> pipeline_job.PipelineJob: """Post-rpc interceptor for get_pipeline_job - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_pipeline_job_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_get_pipeline_job` interceptor runs + before the `post_get_pipeline_job_with_metadata` interceptor. """ return response + async def post_get_pipeline_job_with_metadata( + self, + response: pipeline_job.PipelineJob, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[pipeline_job.PipelineJob, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_pipeline_job + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_get_pipeline_job_with_metadata` + interceptor in new development instead of the `post_get_pipeline_job` interceptor. + When both interceptors are used, this `post_get_pipeline_job_with_metadata` interceptor runs after the + `post_get_pipeline_job` interceptor. The (possibly modified) response returned by + `post_get_pipeline_job` will be passed to + `post_get_pipeline_job_with_metadata`. + """ + return response, metadata + async def pre_get_training_pipeline( self, request: pipeline_service.GetTrainingPipelineRequest, @@ -427,12 +593,37 @@ async def post_get_training_pipeline( ) -> training_pipeline.TrainingPipeline: """Post-rpc interceptor for get_training_pipeline - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_training_pipeline_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_get_training_pipeline` interceptor runs + before the `post_get_training_pipeline_with_metadata` interceptor. """ return response + async def post_get_training_pipeline_with_metadata( + self, + response: training_pipeline.TrainingPipeline, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + training_pipeline.TrainingPipeline, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_training_pipeline + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_get_training_pipeline_with_metadata` + interceptor in new development instead of the `post_get_training_pipeline` interceptor. + When both interceptors are used, this `post_get_training_pipeline_with_metadata` interceptor runs after the + `post_get_training_pipeline` interceptor. The (possibly modified) response returned by + `post_get_training_pipeline` will be passed to + `post_get_training_pipeline_with_metadata`. + """ + return response, metadata + async def pre_list_pipeline_jobs( self, request: pipeline_service.ListPipelineJobsRequest, @@ -453,12 +644,38 @@ async def post_list_pipeline_jobs( ) -> pipeline_service.ListPipelineJobsResponse: """Post-rpc interceptor for list_pipeline_jobs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_pipeline_jobs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_list_pipeline_jobs` interceptor runs + before the `post_list_pipeline_jobs_with_metadata` interceptor. """ return response + async def post_list_pipeline_jobs_with_metadata( + self, + response: pipeline_service.ListPipelineJobsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + pipeline_service.ListPipelineJobsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_pipeline_jobs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_list_pipeline_jobs_with_metadata` + interceptor in new development instead of the `post_list_pipeline_jobs` interceptor. + When both interceptors are used, this `post_list_pipeline_jobs_with_metadata` interceptor runs after the + `post_list_pipeline_jobs` interceptor. The (possibly modified) response returned by + `post_list_pipeline_jobs` will be passed to + `post_list_pipeline_jobs_with_metadata`. + """ + return response, metadata + async def pre_list_training_pipelines( self, request: pipeline_service.ListTrainingPipelinesRequest, @@ -479,12 +696,38 @@ async def post_list_training_pipelines( ) -> pipeline_service.ListTrainingPipelinesResponse: """Post-rpc interceptor for list_training_pipelines - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_training_pipelines_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the PipelineService server but before - it is returned to user code. + it is returned to user code. This `post_list_training_pipelines` interceptor runs + before the `post_list_training_pipelines_with_metadata` interceptor. """ return response + async def post_list_training_pipelines_with_metadata( + self, + response: pipeline_service.ListTrainingPipelinesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + pipeline_service.ListTrainingPipelinesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_training_pipelines + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the PipelineService server but before it is returned to user code. + + We recommend only using this `post_list_training_pipelines_with_metadata` + interceptor in new development instead of the `post_list_training_pipelines` interceptor. + When both interceptors are used, this `post_list_training_pipelines_with_metadata` interceptor runs after the + `post_list_training_pipelines` interceptor. The (possibly modified) response returned by + `post_list_training_pipelines` will be passed to + `post_list_training_pipelines_with_metadata`. + """ + return response, metadata + async def pre_get_location( self, request: locations_pb2.GetLocationRequest, @@ -796,9 +1039,9 @@ def __init__( self._interceptor = interceptor or AsyncPipelineServiceRestInterceptor() self._wrap_with_kind = True self._prep_wrapped_messages(client_info) - self._operations_client: Optional[ - operations_v1.AsyncOperationsRestClient - ] = None + self._operations_client: Optional[operations_v1.AsyncOperationsRestClient] = ( + None + ) def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" @@ -1010,7 +1253,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1057,6 +1300,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_cancel_pipeline_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_batch_cancel_pipeline_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1171,7 +1420,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1218,6 +1467,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_batch_delete_pipeline_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_batch_delete_pipeline_jobs_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1324,7 +1579,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1447,7 +1702,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1623,6 +1878,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_pipeline_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_create_pipeline_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1786,6 +2045,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_create_training_pipeline(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_create_training_pipeline_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -1896,7 +2161,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -1942,6 +2207,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_pipeline_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_delete_pipeline_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2050,7 +2319,7 @@ async def __call__( ) method = transcoded_request["method"] try: - request_payload = json_format.MessageToJson(request) + request_payload = type(request).to_json(request) except: request_payload = None http_request = { @@ -2096,6 +2365,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_delete_training_pipeline(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_delete_training_pipeline_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2251,6 +2526,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_pipeline_job(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_pipeline_job_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2408,6 +2687,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_get_training_pipeline(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_get_training_pipeline_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2565,6 +2848,10 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_pipeline_jobs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = await self._interceptor.post_list_pipeline_jobs_with_metadata( + resp, response_metadata + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2720,6 +3007,12 @@ async def __call__( content = await response.read() json_format.Parse(content, pb_resp, ignore_unknown_fields=True) resp = await self._interceptor.post_list_training_pipelines(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = ( + await self._interceptor.post_list_training_pipelines_with_metadata( + resp, response_metadata + ) + ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER @@ -2917,6 +3210,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -3053,6 +3350,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -3061,6 +3362,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3101,6 +3410,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ], "google.longrunning.Operations.DeleteOperation": [ { @@ -3263,6 +3576,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -3291,6 +3608,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -3403,6 +3724,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3411,6 +3736,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3471,6 +3804,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.GetOperation": [ { @@ -3633,6 +3970,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -3673,6 +4014,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -3785,6 +4130,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -3793,6 +4142,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3853,6 +4210,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ], "google.longrunning.Operations.ListOperations": [ { @@ -4011,6 +4372,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -4051,6 +4416,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -4163,6 +4532,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -4183,6 +4560,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -4231,6 +4612,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -4397,6 +4782,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -4429,6 +4818,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -4537,6 +4930,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -4545,6 +4942,14 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -4605,6 +5010,10 @@ def operations_client(self) -> AsyncOperationsRestClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ], } @@ -4758,7 +5167,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.Location: - r"""Call the get location method over HTTP. Args: @@ -4911,7 +5319,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> locations_pb2.ListLocationsResponse: - r"""Call the list locations method over HTTP. Args: @@ -5064,7 +5471,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. Args: @@ -5218,7 +5624,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. Args: @@ -5376,7 +5781,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. Args: @@ -5527,7 +5931,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -5652,7 +6055,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -5777,7 +6179,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -5930,7 +6331,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: @@ -6083,7 +6483,6 @@ async def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the wait operation method over HTTP. Args: diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py index 27ab4d3124..9df3aa4873 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -32,7 +32,7 @@ from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore +import google.protobuf.empty_pb2 as empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore @@ -843,6 +843,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:getIamPolicy", + }, ] return http_options @@ -929,6 +933,11 @@ def _get_http_options(): "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -1007,6 +1016,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureGroups/*}:testIamPermissions", + }, ] return http_options @@ -1188,6 +1201,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", @@ -1324,6 +1341,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", @@ -1332,6 +1353,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1372,6 +1401,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:cancel", + }, ] return http_options @@ -1553,6 +1586,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", @@ -1581,6 +1618,10 @@ def _get_http_options(): "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", @@ -1693,6 +1734,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -1701,6 +1746,14 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1761,6 +1814,10 @@ def _get_http_options(): "method": "delete", "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -1942,6 +1999,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", @@ -1982,6 +2043,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/operations/*}", @@ -2094,6 +2159,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", @@ -2102,6 +2171,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2162,6 +2239,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}", + }, ] return http_options @@ -2339,6 +2420,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", @@ -2379,6 +2464,10 @@ def _get_http_options(): "method": "get", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*}/operations", @@ -2491,6 +2580,14 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", @@ -2511,6 +2608,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*}/operations", @@ -2559,6 +2660,10 @@ def _get_http_options(): "method": "get", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "get", + "uri": "/v1/{name=reasoningEngines/*/sessions/*}/operations", + }, ] return http_options @@ -2744,6 +2849,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", @@ -2776,6 +2885,10 @@ def _get_http_options(): "method": "post", "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/featureMonitors/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", @@ -2884,6 +2997,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/ragEngineConfig/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", @@ -2892,6 +3009,14 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/reasoningEngines/*/sessions/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", @@ -2952,6 +3077,10 @@ def _get_http_options(): "method": "post", "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=reasoningEngines/*/sessions/*/operations/*}:wait", + }, ] return http_options diff --git a/google/cloud/aiplatform_v1/services/prediction_service/__init__.py b/google/cloud/aiplatform_v1/services/prediction_service/__init__.py index bde71cacd5..4d2d0a66bb 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/__init__.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index 2c269eb6b4..6d84abe2c2 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,6 +40,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -47,17 +48,20 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import content +from google.cloud.aiplatform_v1.types import content as gca_content from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.aiplatform_v1.types import types +from google.cloud.aiplatform_v1.types import usage_metadata from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore +import google.protobuf.any_pb2 as any_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport from .client import PredictionServiceClient @@ -84,12 +88,18 @@ class PredictionServiceAsyncClient: _DEFAULT_ENDPOINT_TEMPLATE = PredictionServiceClient._DEFAULT_ENDPOINT_TEMPLATE _DEFAULT_UNIVERSE = PredictionServiceClient._DEFAULT_UNIVERSE + cached_content_path = staticmethod(PredictionServiceClient.cached_content_path) + parse_cached_content_path = staticmethod( + PredictionServiceClient.parse_cached_content_path + ) endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) model_path = staticmethod(PredictionServiceClient.model_path) parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) rag_corpus_path = staticmethod(PredictionServiceClient.rag_corpus_path) parse_rag_corpus_path = staticmethod(PredictionServiceClient.parse_rag_corpus_path) + template_path = staticmethod(PredictionServiceClient.template_path) + parse_template_path = staticmethod(PredictionServiceClient.parse_template_path) common_billing_account_path = staticmethod( PredictionServiceClient.common_billing_account_path ) @@ -128,7 +138,10 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): Returns: PredictionServiceAsyncClient: The constructed client. """ - return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + sa_info_func = ( + PredictionServiceClient.from_service_account_info.__func__ # type: ignore + ) + return sa_info_func(PredictionServiceAsyncClient, info, *args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -144,7 +157,10 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): Returns: PredictionServiceAsyncClient: The constructed client. """ - return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + sa_file_func = ( + PredictionServiceClient.from_service_account_file.__func__ # type: ignore + ) + return sa_file_func(PredictionServiceAsyncClient, filename, *args, **kwargs) from_service_account_json = from_service_account_file @@ -289,21 +305,23 @@ def __init__( ): # pragma: NO COVER _LOGGER.debug( "Created client `google.cloud.aiplatform_v1.PredictionServiceAsyncClient`.", - extra={ - "serviceName": "google.cloud.aiplatform.v1.PredictionService", - "universeDomain": getattr( - self._client._transport._credentials, "universe_domain", "" - ), - "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", - "credentialsInfo": getattr( - self.transport._credentials, "get_cred_info", lambda: None - )(), - } - if hasattr(self._client._transport, "_credentials") - else { - "serviceName": "google.cloud.aiplatform.v1.PredictionService", - "credentialsType": None, - }, + extra=( + { + "serviceName": "google.cloud.aiplatform.v1.PredictionService", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.cloud.aiplatform.v1.PredictionService", + "credentialsType": None, + } + ), ) async def predict( @@ -405,7 +423,10 @@ async def sample_predict(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) + flattened_params = [endpoint, instances, parameters] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -464,13 +485,13 @@ async def raw_predict( The response includes the following HTTP headers: - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served + this prediction. - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] that + served this prediction. .. code-block:: python @@ -597,7 +618,10 @@ async def sample_raw_predict(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, http_body]) + flattened_params = [endpoint, http_body] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -764,7 +788,10 @@ async def sample_stream_raw_predict(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, http_body]) + flattened_params = [endpoint, http_body] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1588,7 +1615,10 @@ async def sample_explain(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) + flattened_params = [endpoint, instances, parameters, deployed_model_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1723,7 +1753,10 @@ async def sample_generate_content(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, contents]) + flattened_params = [model, contents] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1858,7 +1891,10 @@ async def sample_stream_generate_content(): # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, contents]) + flattened_params = [model, contents] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1903,6 +1939,127 @@ async def sample_stream_generate_content(): # Done; return the response. return response + async def embed_content( + self, + request: Optional[Union[prediction_service.EmbedContentRequest, dict]] = None, + *, + model: Optional[str] = None, + content: Optional[gca_content.Content] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> prediction_service.EmbedContentResponse: + r"""Embed content with multimodal inputs. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_embed_content(): + # Create a client + client = aiplatform_v1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.EmbedContentRequest( + ) + + # Make the request + response = await client.embed_content(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.EmbedContentRequest, dict]]): + The request object. Request message for + [PredictionService.EmbedContent][google.cloud.aiplatform.v1.PredictionService.EmbedContent]. + model (:class:`str`): + Required. The name of the publisher model requested to + serve the prediction. Format: + ``projects/{project}/locations/{location}/publishers/*/models/*`` + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + content (:class:`google.cloud.aiplatform_v1.types.Content`): + Required. Input content to be + embedded. + + This corresponds to the ``content`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.aiplatform_v1.types.EmbedContentResponse: + Response message for + [PredictionService.EmbedContent][google.cloud.aiplatform.v1.PredictionService.EmbedContent]. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [model, content] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, prediction_service.EmbedContentRequest): + request = prediction_service.EmbedContentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if content is not None: + request.content = content + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.embed_content + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, @@ -2611,5 +2768,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("PredictionServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 5ba11e3715..3105773487 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json import logging as std_logging import os import re @@ -45,6 +47,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -60,17 +63,20 @@ _LOGGER = std_logging.getLogger(__name__) -from google.api import httpbody_pb2 # type: ignore from google.cloud.aiplatform_v1.types import content +from google.cloud.aiplatform_v1.types import content as gca_content from google.cloud.aiplatform_v1.types import explanation from google.cloud.aiplatform_v1.types import prediction_service from google.cloud.aiplatform_v1.types import types +from google.cloud.aiplatform_v1.types import usage_metadata from google.cloud.location import locations_pb2 # type: ignore from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore +import google.api.httpbody_pb2 as httpbody_pb2 # type: ignore +import google.protobuf.any_pb2 as any_pb2 # type: ignore +import google.protobuf.struct_pb2 as struct_pb2 # type: ignore +import google.protobuf.timestamp_pb2 as timestamp_pb2 # type: ignore from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import PredictionServiceGrpcTransport from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport @@ -170,6 +176,34 @@ def _get_default_mtls_endpoint(api_endpoint): _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" _DEFAULT_UNIVERSE = "googleapis.com" + @staticmethod + def _use_client_cert_effective(): + """Returns whether client certificate should be used for mTLS if the + google-auth version supports should_use_client_cert automatic mTLS enablement. + + Alternatively, read from the GOOGLE_API_USE_CLIENT_CERTIFICATE env var. + + Returns: + bool: whether client certificate should be used for mTLS + Raises: + ValueError: (If using a version of google-auth without should_use_client_cert and + GOOGLE_API_USE_CLIENT_CERTIFICATE is set to an unexpected value.) + """ + # check if google-auth version supports should_use_client_cert for automatic mTLS enablement + if hasattr(mtls, "should_use_client_cert"): # pragma: NO COVER + return mtls.should_use_client_cert() + else: # pragma: NO COVER + # if unsupported, fallback to reading from env var + use_client_cert_str = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert_str not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be" + " either `true` or `false`" + ) + return use_client_cert_str == "true" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -217,6 +251,28 @@ def transport(self) -> PredictionServiceTransport: """ return self._transport + @staticmethod + def cached_content_path( + project: str, + location: str, + cached_content: str, + ) -> str: + """Returns a fully-qualified cached_content string.""" + return "projects/{project}/locations/{location}/cachedContents/{cached_content}".format( + project=project, + location=location, + cached_content=cached_content, + ) + + @staticmethod + def parse_cached_content_path(path: str) -> Dict[str, str]: + """Parses a cached_content path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/cachedContents/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def endpoint_path( project: str, @@ -283,6 +339,28 @@ def parse_rag_corpus_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def template_path( + project: str, + location: str, + template: str, + ) -> str: + """Returns a fully-qualified template string.""" + return "projects/{project}/locations/{location}/templates/{template}".format( + project=project, + location=location, + template=template, + ) + + @staticmethod + def parse_template_path(path: str) -> Dict[str, str]: + """Parses a template path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/templates/(?P