From 983f61624019da7876ce67d495e26e928a6b5224 Mon Sep 17 00:00:00 2001
From: Yoshi Automation Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -274,7 +274,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -426,7 +426,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -549,7 +549,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
diff --git a/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html b/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html
index 8c742cdf1dc..c66ee00d780 100644
--- a/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html
+++ b/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html
@@ -240,6 +240,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
"trialJobSpec": { # Represents the spec of a CustomJob. # Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials.
"baseOutputDirectory": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/`
@@ -250,7 +265,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -470,6 +485,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
"trialJobSpec": { # Represents the spec of a CustomJob. # Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials.
"baseOutputDirectory": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/`
@@ -480,7 +510,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -742,6 +772,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
"trialJobSpec": { # Represents the spec of a CustomJob. # Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials.
"baseOutputDirectory": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/`
@@ -752,7 +797,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -985,6 +1030,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
"trialJobSpec": { # Represents the spec of a CustomJob. # Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials.
"baseOutputDirectory": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/`
@@ -995,7 +1055,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
diff --git a/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html b/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html
index 83ded9ac50c..c66feb0e0f8 100644
--- a/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html
+++ b/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html
@@ -220,7 +220,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -298,7 +298,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -462,7 +462,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -540,7 +540,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -746,7 +746,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -824,7 +824,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -1001,7 +1001,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -1079,7 +1079,7 @@ Method Details
"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}`
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
diff --git a/docs/dyn/aiplatform_v1.projects.locations.studies.html b/docs/dyn/aiplatform_v1.projects.locations.studies.html
index 0882f98a7e4..10bb6a2ebf0 100644
--- a/docs/dyn/aiplatform_v1.projects.locations.studies.html
+++ b/docs/dyn/aiplatform_v1.projects.locations.studies.html
@@ -120,7 +120,7 @@ Method Details
body: object, The request body.
The object takes the form of:
-{ # A message representing a Study.
+{ # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -202,6 +202,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
}
@@ -213,7 +228,7 @@ Method Details
Returns:
An object of the form:
- { # A message representing a Study.
+ { # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -295,6 +310,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
}
@@ -331,7 +361,7 @@ Method Details
Returns:
An object of the form:
- { # A message representing a Study.
+ { # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -413,6 +443,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
}
@@ -436,7 +481,7 @@ Method Details
{ # Response message for VizierService.ListStudies.
"nextPageToken": "A String", # Passes this token as the `page_token` field of the request for a subsequent call. If this field is omitted, there are no subsequent pages.
"studies": [ # The studies associated with the project.
- { # A message representing a Study.
+ { # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -518,6 +563,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
},
],
@@ -559,7 +619,7 @@ Method Details
Returns:
An object of the form:
- { # A message representing a Study.
+ { # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -641,6 +701,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
},
}
diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html
index 9260d3ba929..e9d288d8626 100644
--- a/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html
+++ b/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html
@@ -165,7 +165,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -276,7 +276,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -429,7 +429,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -553,7 +553,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html
index 8075f4874af..ee859e575d5 100644
--- a/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html
+++ b/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html
@@ -247,6 +247,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
@@ -264,7 +279,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -491,6 +506,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
@@ -508,7 +538,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -777,6 +807,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
@@ -794,7 +839,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -1034,6 +1079,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
@@ -1051,7 +1111,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html
index 23aa38fcc74..79c50f7623a 100644
--- a/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html
+++ b/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html
@@ -221,7 +221,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -300,7 +300,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -465,7 +465,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -544,7 +544,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -751,7 +751,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -830,7 +830,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -1008,7 +1008,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
@@ -1087,7 +1087,7 @@ Method Details
"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}`
"network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network.
"persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected.
- "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
+ "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations
"reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range'].
"A String",
],
diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.studies.html b/docs/dyn/aiplatform_v1beta1.projects.locations.studies.html
index 5d16f7d2192..806c85e918c 100644
--- a/docs/dyn/aiplatform_v1beta1.projects.locations.studies.html
+++ b/docs/dyn/aiplatform_v1beta1.projects.locations.studies.html
@@ -120,7 +120,7 @@ Method Details
body: object, The request body.
The object takes the form of:
-{ # A message representing a Study.
+{ # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -209,6 +209,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
@@ -226,7 +241,7 @@ Method Details
Returns:
An object of the form:
- { # A message representing a Study.
+ { # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -315,6 +330,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
@@ -357,7 +387,7 @@ Method Details
Returns:
An object of the form:
- { # A message representing a Study.
+ { # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -446,6 +476,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
@@ -475,7 +520,7 @@ Method Details
{ # Response message for VizierService.ListStudies.
"nextPageToken": "A String", # Passes this token as the `page_token` field of the request for a subsequent call. If this field is omitted, there are no subsequent pages.
"studies": [ # The studies associated with the project.
- { # A message representing a Study.
+ { # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -564,6 +609,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
@@ -611,7 +671,7 @@ Method Details
Returns:
An object of the form:
- { # A message representing a Study.
+ { # A message representing a Study. Next id: 12
"createTime": "A String", # Output only. Time at which the study was created.
"displayName": "A String", # Required. Describes the Study, default value is empty string.
"inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED.
@@ -700,6 +760,21 @@ Method Details
"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters.
},
],
+ "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition.
+ "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.
+ "maxNumTrials": 42, # If there are more than this many trials, stop the study.
+ "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.
+ "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study.
+ "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study.
+ "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone.
+ "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study.
+ },
+ "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).
+ },
"transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob
"disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning.
"priorStudyNames": [ # Output only. Names of previously completed studies
diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json
index 3f6d6d942bd..1f04bc4239b 100644
--- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json
+++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json
@@ -12998,7 +12998,7 @@
}
}
},
- "revision": "20231012",
+ "revision": "20231023",
"rootUrl": "https://aiplatform.googleapis.com/",
"schemas": {
"GoogleApiHttpBody": {
@@ -14711,7 +14711,7 @@
"type": "string"
},
"protectedArtifactLocationId": {
- "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations",
+ "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations",
"type": "string"
},
"reservedIpRanges": {
@@ -26815,7 +26815,7 @@
"type": "object"
},
"GoogleCloudAiplatformV1Study": {
- "description": "A message representing a Study.",
+ "description": "A message representing a Study. Next id: 12",
"id": "GoogleCloudAiplatformV1Study",
"properties": {
"createTime": {
@@ -26933,6 +26933,10 @@
"$ref": "GoogleCloudAiplatformV1StudySpecParameterSpec"
},
"type": "array"
+ },
+ "studyStoppingConfig": {
+ "$ref": "GoogleCloudAiplatformV1StudySpecStudyStoppingConfig",
+ "description": "Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition."
}
},
"type": "object"
@@ -27238,6 +27242,62 @@
},
"type": "object"
},
+ "GoogleCloudAiplatformV1StudySpecStudyStoppingConfig": {
+ "description": "The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection.",
+ "id": "GoogleCloudAiplatformV1StudySpecStudyStoppingConfig",
+ "properties": {
+ "maxDurationNoProgress": {
+ "description": "If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.",
+ "format": "google-duration",
+ "type": "string"
+ },
+ "maxNumTrials": {
+ "description": "If there are more than this many trials, stop the study.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "maxNumTrialsNoProgress": {
+ "description": "If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "maximumRuntimeConstraint": {
+ "$ref": "GoogleCloudAiplatformV1StudyTimeConstraint",
+ "description": "If the specified time or duration has passed, stop the study."
+ },
+ "minNumTrials": {
+ "description": "If there are fewer than this many COMPLETED trials, do not stop the study.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "minimumRuntimeConstraint": {
+ "$ref": "GoogleCloudAiplatformV1StudyTimeConstraint",
+ "description": "Each \"stopping rule\" in this proto specifies an \"if\" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose \"if\" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study."
+ },
+ "shouldStopAsap": {
+ "description": "If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).",
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+ },
+ "GoogleCloudAiplatformV1StudyTimeConstraint": {
+ "description": "Time-based Constraint for Study",
+ "id": "GoogleCloudAiplatformV1StudyTimeConstraint",
+ "properties": {
+ "endTime": {
+ "description": "Compares the wallclock time to this time. Must use UTC timezone.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "maxDuration": {
+ "description": "Counts the wallclock time passed since the creation of this Study.",
+ "format": "google-duration",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudAiplatformV1SuggestTrialsMetadata": {
"description": "Details of operations that perform Trials suggestion.",
"id": "GoogleCloudAiplatformV1SuggestTrialsMetadata",
diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json
index 5daae0c9a32..c3353fa5998 100644
--- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json
@@ -16094,7 +16094,7 @@
}
}
},
- "revision": "20231012",
+ "revision": "20231023",
"rootUrl": "https://aiplatform.googleapis.com/",
"schemas": {
"GoogleApiHttpBody": {
@@ -17987,7 +17987,7 @@
"type": "string"
},
"protectedArtifactLocationId": {
- "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations",
+ "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations",
"type": "string"
},
"reservedIpRanges": {
@@ -31302,7 +31302,7 @@
"type": "object"
},
"GoogleCloudAiplatformV1beta1Study": {
- "description": "A message representing a Study.",
+ "description": "A message representing a Study. Next id: 12",
"id": "GoogleCloudAiplatformV1beta1Study",
"properties": {
"createTime": {
@@ -31426,6 +31426,10 @@
},
"type": "array"
},
+ "studyStoppingConfig": {
+ "$ref": "GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig",
+ "description": "Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition."
+ },
"transferLearningConfig": {
"$ref": "GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig",
"description": "The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob"
@@ -31765,6 +31769,45 @@
},
"type": "object"
},
+ "GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig": {
+ "description": "The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection.",
+ "id": "GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig",
+ "properties": {
+ "maxDurationNoProgress": {
+ "description": "If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.",
+ "format": "google-duration",
+ "type": "string"
+ },
+ "maxNumTrials": {
+ "description": "If there are more than this many trials, stop the study.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "maxNumTrialsNoProgress": {
+ "description": "If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "maximumRuntimeConstraint": {
+ "$ref": "GoogleCloudAiplatformV1beta1StudyTimeConstraint",
+ "description": "If the specified time or duration has passed, stop the study."
+ },
+ "minNumTrials": {
+ "description": "If there are fewer than this many COMPLETED trials, do not stop the study.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "minimumRuntimeConstraint": {
+ "$ref": "GoogleCloudAiplatformV1beta1StudyTimeConstraint",
+ "description": "Each \"stopping rule\" in this proto specifies an \"if\" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose \"if\" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study."
+ },
+ "shouldStopAsap": {
+ "description": "If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).",
+ "type": "boolean"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig": {
"description": "This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here.",
"id": "GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig",
@@ -31784,6 +31827,23 @@
},
"type": "object"
},
+ "GoogleCloudAiplatformV1beta1StudyTimeConstraint": {
+ "description": "Time-based Constraint for Study",
+ "id": "GoogleCloudAiplatformV1beta1StudyTimeConstraint",
+ "properties": {
+ "endTime": {
+ "description": "Compares the wallclock time to this time. Must use UTC timezone.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "maxDuration": {
+ "description": "Counts the wallclock time passed since the creation of this Study.",
+ "format": "google-duration",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
"GoogleCloudAiplatformV1beta1SuggestTrialsMetadata": {
"description": "Details of operations that perform Trials suggestion.",
"id": "GoogleCloudAiplatformV1beta1SuggestTrialsMetadata",
From 5779a441640f840f866736514a66718a5ed07e80 Mon Sep 17 00:00:00 2001
From: Yoshi Automation Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -1204,6 +1208,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
diff --git a/docs/dyn/analyticsadmin_v1alpha.properties.conversionEvents.html b/docs/dyn/analyticsadmin_v1alpha.properties.conversionEvents.html
index 4bf550b5f19..e34d131b8e1 100644
--- a/docs/dyn/analyticsadmin_v1alpha.properties.conversionEvents.html
+++ b/docs/dyn/analyticsadmin_v1alpha.properties.conversionEvents.html
@@ -114,6 +114,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -131,6 +135,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -173,6 +181,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -201,6 +213,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -237,6 +253,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -255,6 +275,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
diff --git a/docs/dyn/analyticsadmin_v1beta.accounts.html b/docs/dyn/analyticsadmin_v1beta.accounts.html
index 4757aa39c1c..ebff525d555 100644
--- a/docs/dyn/analyticsadmin_v1beta.accounts.html
+++ b/docs/dyn/analyticsadmin_v1beta.accounts.html
@@ -543,6 +543,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -620,6 +624,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
diff --git a/docs/dyn/analyticsadmin_v1beta.properties.conversionEvents.html b/docs/dyn/analyticsadmin_v1beta.properties.conversionEvents.html
index 07a7093cf8e..95a9689dce9 100644
--- a/docs/dyn/analyticsadmin_v1beta.properties.conversionEvents.html
+++ b/docs/dyn/analyticsadmin_v1beta.properties.conversionEvents.html
@@ -114,6 +114,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -131,6 +135,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -173,6 +181,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -201,6 +213,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -237,6 +253,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
@@ -255,6 +275,10 @@ Method Details
"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`.
"createTime": "A String", # Output only. Time when this conversion event was created in the property.
"custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property.
+ "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event.
+ "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.
+ "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset.
+ },
"deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent.
"eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase'
"name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event}
diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
index bd6abaf02d2..bb96032ddc3 100644
--- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json
@@ -4298,7 +4298,7 @@
}
}
},
- "revision": "20231022",
+ "revision": "20231027",
"rootUrl": "https://analyticsadmin.googleapis.com/",
"schemas": {
"GoogleAnalyticsAdminV1alphaAccessBetweenFilter": {
@@ -5822,6 +5822,10 @@
"readOnly": true,
"type": "boolean"
},
+ "defaultConversionValue": {
+ "$ref": "GoogleAnalyticsAdminV1alphaConversionEventDefaultConversionValue",
+ "description": "Optional. Defines a default value/currency for a conversion event."
+ },
"deletable": {
"description": "Output only. If set, this event can currently be deleted with DeleteConversionEvent.",
"readOnly": true,
@@ -5839,6 +5843,22 @@
},
"type": "object"
},
+ "GoogleAnalyticsAdminV1alphaConversionEventDefaultConversionValue": {
+ "description": "Defines a default value/currency for a conversion event. Both value and currency must be provided.",
+ "id": "GoogleAnalyticsAdminV1alphaConversionEventDefaultConversionValue",
+ "properties": {
+ "currencyCode": {
+ "description": "When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.",
+ "type": "string"
+ },
+ "value": {
+ "description": "This value will be used to populate the value for all conversions of the specified event_name where the event \"value\" parameter is unset.",
+ "format": "double",
+ "type": "number"
+ }
+ },
+ "type": "object"
+ },
"GoogleAnalyticsAdminV1alphaConversionValues": {
"description": "Conversion value settings for a postback window for SKAdNetwork conversion value schema.",
"id": "GoogleAnalyticsAdminV1alphaConversionValues",
diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json
index ec017476a58..1da19f4921e 100644
--- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json
@@ -1628,7 +1628,7 @@
}
}
},
- "revision": "20231022",
+ "revision": "20231027",
"rootUrl": "https://analyticsadmin.googleapis.com/",
"schemas": {
"GoogleAnalyticsAdminV1betaAccessBetweenFilter": {
@@ -2241,6 +2241,10 @@
"readOnly": true,
"type": "boolean"
},
+ "defaultConversionValue": {
+ "$ref": "GoogleAnalyticsAdminV1betaConversionEventDefaultConversionValue",
+ "description": "Optional. Defines a default value/currency for a conversion event."
+ },
"deletable": {
"description": "Output only. If set, this event can currently be deleted with DeleteConversionEvent.",
"readOnly": true,
@@ -2258,6 +2262,22 @@
},
"type": "object"
},
+ "GoogleAnalyticsAdminV1betaConversionEventDefaultConversionValue": {
+ "description": "Defines a default value/currency for a conversion event. Both value and currency must be provided.",
+ "id": "GoogleAnalyticsAdminV1betaConversionEventDefaultConversionValue",
+ "properties": {
+ "currencyCode": {
+ "description": "When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.",
+ "type": "string"
+ },
+ "value": {
+ "description": "This value will be used to populate the value for all conversions of the specified event_name where the event \"value\" parameter is unset.",
+ "format": "double",
+ "type": "number"
+ }
+ },
+ "type": "object"
+ },
"GoogleAnalyticsAdminV1betaCustomDimension": {
"description": "A definition for a CustomDimension.",
"id": "GoogleAnalyticsAdminV1betaCustomDimension",
From 160c287e0dd64e2576f787b66fd6b9d7cf7e155e Mon Sep 17 00:00:00 2001
From: Yoshi Automation Method Details
The object takes the form of:
{ # Request message to claim a device on behalf of a customer.
+ "configurationId": "A String", # Optional. The unique identifier of the configuration (internally known as profile) to set for the section.
"customerId": "A String", # The ID of the customer for whom the device is being claimed.
"deviceIdentifier": { # Encapsulates hardware and product IDs to identify a manufactured device. To understand requirements on identifier sets, read [Identifiers](https://developers.google.com/zero-touch/guides/identifiers). # Required. Required. The device identifier of the device to claim.
"chromeOsAttestedDeviceId": "A String", # An identifier provided by OEMs, carried through the production and sales process. Only applicable to Chrome OS devices.
@@ -142,7 +143,7 @@ Method Details
"googleWorkspaceCustomerId": "A String", # The Google Workspace customer ID.
"preProvisioningToken": "A String", # Optional. Must and can only be set for Chrome OS devices.
"sectionType": "A String", # Required. The section type of the device's provisioning record.
- "simlockProfileId": "A String", # Optional. Must and can only be set when DeviceProvisioningSectionType is SECTION_TYPE_SIM_LOCK. The unique identifier of the SimLock profile.
+ "simlockProfileId": "A String", # Optional.
}
x__xgafv: string, V1 error format.
@@ -171,6 +172,7 @@ Method Details
{ # Request to claim devices asynchronously in batch. Claiming a device adds the device to zero-touch enrollment and shows the device in the customer's view of the portal.
"claims": [ # Required. A list of device claims.
{ # Identifies one claim request.
+ "configurationId": "A String", # Optional. The unique identifier of the configuration (internally known as profile) to set for the section.
"customerId": "A String", # The ID of the customer for whom the device is being claimed.
"deviceIdentifier": { # Encapsulates hardware and product IDs to identify a manufactured device. To understand requirements on identifier sets, read [Identifiers](https://developers.google.com/zero-touch/guides/identifiers). # Required. Required. Device identifier of the device.
"chromeOsAttestedDeviceId": "A String", # An identifier provided by OEMs, carried through the production and sales process. Only applicable to Chrome OS devices.
@@ -189,7 +191,7 @@ Method Details
"googleWorkspaceCustomerId": "A String", # The Google Workspace customer ID.
"preProvisioningToken": "A String", # Optional. Must and can only be set for Chrome OS devices.
"sectionType": "A String", # Required. The section type of the device's provisioning record.
- "simlockProfileId": "A String", # Optional. Must and can only be set when DeviceProvisioningSectionType is SECTION_TYPE_SIM_LOCK. The unique identifier of the SimLock profile.
+ "simlockProfileId": "A String", # Optional.
},
],
}
diff --git a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
index 095c368862c..01d69175f42 100644
--- a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
+++ b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json
@@ -851,13 +851,18 @@
}
}
},
- "revision": "20231021",
+ "revision": "20231029",
"rootUrl": "https://androiddeviceprovisioning.googleapis.com/",
"schemas": {
"ClaimDeviceRequest": {
"description": "Request message to claim a device on behalf of a customer.",
"id": "ClaimDeviceRequest",
"properties": {
+ "configurationId": {
+ "description": "Optional. The unique identifier of the configuration (internally known as profile) to set for the section.",
+ "format": "int64",
+ "type": "string"
+ },
"customerId": {
"description": "The ID of the customer for whom the device is being claimed.",
"format": "int64",
@@ -894,7 +899,7 @@
"type": "string"
},
"simlockProfileId": {
- "description": "Optional. Must and can only be set when DeviceProvisioningSectionType is SECTION_TYPE_SIM_LOCK. The unique identifier of the SimLock profile.",
+ "description": "Optional. ",
"format": "int64",
"type": "string"
}
@@ -1709,6 +1714,11 @@
"description": "Identifies one claim request.",
"id": "PartnerClaim",
"properties": {
+ "configurationId": {
+ "description": "Optional. The unique identifier of the configuration (internally known as profile) to set for the section.",
+ "format": "int64",
+ "type": "string"
+ },
"customerId": {
"description": "The ID of the customer for whom the device is being claimed.",
"format": "int64",
@@ -1745,7 +1755,7 @@
"type": "string"
},
"simlockProfileId": {
- "description": "Optional. Must and can only be set when DeviceProvisioningSectionType is SECTION_TYPE_SIM_LOCK. The unique identifier of the SimLock profile.",
+ "description": "Optional. ",
"format": "int64",
"type": "string"
}
From d6c3c723660b03319bae6ab0573b83d9f340cc07 Mon Sep 17 00:00:00 2001
From: Yoshi Automation Method Details
"operatingSystem": "A String", # The operating system of the application runtime.
"runtimeVersion": "A String", # The runtime version of an App Engine flexible application.
},
+ "generatedCustomerMetadata": { # Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
"handlers": [ # An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set.
{ # URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript.
"apiEndpoint": { # Uses Google Cloud Endpoints to handle requests. # Uses API Endpoints to handle requests.
@@ -518,6 +521,9 @@ Method Details
"operatingSystem": "A String", # The operating system of the application runtime.
"runtimeVersion": "A String", # The runtime version of an App Engine flexible application.
},
+ "generatedCustomerMetadata": { # Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
"handlers": [ # An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set.
{ # URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript.
"apiEndpoint": { # Uses Google Cloud Endpoints to handle requests. # Uses API Endpoints to handle requests.
@@ -756,6 +762,9 @@ Method Details
"operatingSystem": "A String", # The operating system of the application runtime.
"runtimeVersion": "A String", # The runtime version of an App Engine flexible application.
},
+ "generatedCustomerMetadata": { # Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
"handlers": [ # An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set.
{ # URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript.
"apiEndpoint": { # Uses Google Cloud Endpoints to handle requests. # Uses API Endpoints to handle requests.
@@ -997,6 +1006,9 @@ Method Details
"operatingSystem": "A String", # The operating system of the application runtime.
"runtimeVersion": "A String", # The runtime version of an App Engine flexible application.
},
+ "generatedCustomerMetadata": { # Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
"handlers": [ # An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set.
{ # URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript.
"apiEndpoint": { # Uses Google Cloud Endpoints to handle requests. # Uses API Endpoints to handle requests.
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1.json b/googleapiclient/discovery_cache/documents/appengine.v1.json
index 9cc272c0540..5eda718c29c 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1.json
@@ -1610,7 +1610,7 @@
}
}
},
- "revision": "20231016",
+ "revision": "20231024",
"rootUrl": "https://appengine.googleapis.com/",
"schemas": {
"ApiConfigHandler": {
@@ -3829,6 +3829,14 @@
"$ref": "FlexibleRuntimeSettings",
"description": "Settings for App Engine flexible runtimes."
},
+ "generatedCustomerMetadata": {
+ "additionalProperties": {
+ "description": "Properties of the object. Contains field @type with type URL.",
+ "type": "any"
+ },
+ "description": "Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest",
+ "type": "object"
+ },
"handlers": {
"description": "An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set.",
"items": {
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
index ec0ed8d605d..c8cf7320f25 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json
@@ -887,7 +887,7 @@
}
}
},
- "revision": "20231016",
+ "revision": "20231030",
"rootUrl": "https://appengine.googleapis.com/",
"schemas": {
"AuthorizedCertificate": {
diff --git a/googleapiclient/discovery_cache/documents/appengine.v1beta.json b/googleapiclient/discovery_cache/documents/appengine.v1beta.json
index 7efc4fecd6a..b3ce8df8037 100644
--- a/googleapiclient/discovery_cache/documents/appengine.v1beta.json
+++ b/googleapiclient/discovery_cache/documents/appengine.v1beta.json
@@ -1859,7 +1859,7 @@
}
}
},
- "revision": "20231016",
+ "revision": "20231024",
"rootUrl": "https://appengine.googleapis.com/",
"schemas": {
"ApiConfigHandler": {
From 2d0f2c5b561c1844be7f382155ec232a214c1f5c Mon Sep 17 00:00:00 2001
From: Yoshi Automation Method Details
"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -324,12 +324,12 @@ Method Details
"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -403,12 +403,12 @@ Method Details
"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -505,12 +505,12 @@ Method Details
"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -573,12 +573,12 @@ Method Details
"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -640,12 +640,12 @@ Method Details
"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
diff --git a/docs/dyn/assuredworkloads_v1beta1.organizations.locations.workloads.html b/docs/dyn/assuredworkloads_v1beta1.organizations.locations.workloads.html
index 5ff267199ba..dcfa8580cc0 100644
--- a/docs/dyn/assuredworkloads_v1beta1.organizations.locations.workloads.html
+++ b/docs/dyn/assuredworkloads_v1beta1.organizations.locations.workloads.html
@@ -211,12 +211,12 @@ Method Details
"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -376,12 +376,12 @@ Method Details
"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -481,12 +481,12 @@ Method Details
"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -591,12 +591,12 @@ Method Details
"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
@@ -684,12 +684,12 @@ Method Details
"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged.
"activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged.
},
- "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment."
+ "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.
"A String",
],
"createTime": "A String", # Output only. Immutable. The Workload creation timestamp.
"displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload
- "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload.
+ "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload.
"ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any.
"ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails
"ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload.
diff --git a/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json b/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json
index 0af2b3c3b1d..e5c72c439fa 100644
--- a/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json
+++ b/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json
@@ -566,7 +566,7 @@
}
}
},
- "revision": "20231017",
+ "revision": "20231023",
"rootUrl": "https://assuredworkloads.googleapis.com/",
"schemas": {
"GoogleCloudAssuredworkloadsV1AcknowledgeViolationRequest": {
@@ -1121,7 +1121,7 @@
"readOnly": true
},
"compliantButDisallowedServices": {
- "description": "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.\"",
+ "description": "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.",
"items": {
"type": "string"
},
@@ -1140,7 +1140,8 @@
},
"ekmProvisioningResponse": {
"$ref": "GoogleCloudAssuredworkloadsV1WorkloadEkmProvisioningResponse",
- "description": "Optional. Represents the Ekm Provisioning State of the given workload."
+ "description": "Output only. Represents the Ekm Provisioning State of the given workload.",
+ "readOnly": true
},
"enableSovereignControls": {
"description": "Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers.",
diff --git a/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json b/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json
index 339913c7791..f71b1e8e966 100644
--- a/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json
+++ b/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json
@@ -563,7 +563,7 @@
}
}
},
- "revision": "20231017",
+ "revision": "20231023",
"rootUrl": "https://assuredworkloads.googleapis.com/",
"schemas": {
"GoogleCloudAssuredworkloadsV1beta1AcknowledgeViolationRequest": {
@@ -1167,7 +1167,7 @@
"readOnly": true
},
"compliantButDisallowedServices": {
- "description": "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.\"",
+ "description": "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.",
"items": {
"type": "string"
},
@@ -1186,7 +1186,8 @@
},
"ekmProvisioningResponse": {
"$ref": "GoogleCloudAssuredworkloadsV1beta1WorkloadEkmProvisioningResponse",
- "description": "Optional. Represents the Ekm Provisioning State of the given workload."
+ "description": "Output only. Represents the Ekm Provisioning State of the given workload.",
+ "readOnly": true
},
"enableSovereignControls": {
"description": "Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers.",
From 38e594da9323540fa904a25d385c76b4fc6edb2c Mon Sep 17 00:00:00 2001
From: Yoshi Automation Method Details
"taskSource": "A String", # TaskSource represents the source of the task.
},
],
+ "useBatchMonitoredResource": True or False, # If true, the cloud logging for batch agent will use batch.googleapis.com/Job as monitored resource for Batch job related logging.
}
diff --git a/googleapiclient/discovery_cache/documents/batch.v1.json b/googleapiclient/discovery_cache/documents/batch.v1.json
index 0de4d032afb..3ab620498f5 100644
--- a/googleapiclient/discovery_cache/documents/batch.v1.json
+++ b/googleapiclient/discovery_cache/documents/batch.v1.json
@@ -561,7 +561,7 @@
}
}
},
- "revision": "20231009",
+ "revision": "20231018",
"rootUrl": "https://batch.googleapis.com/",
"schemas": {
"Accelerator": {
@@ -1835,6 +1835,10 @@
"$ref": "AgentTask"
},
"type": "array"
+ },
+ "useBatchMonitoredResource": {
+ "description": "If true, the cloud logging for batch agent will use batch.googleapis.com/Job as monitored resource for Batch job related logging.",
+ "type": "boolean"
}
},
"type": "object"
From d536b311d73aeac6ae0af89f3bcc22a204ce53f9 Mon Sep 17 00:00:00 2001
From: Yoshi Automation Instance Methods
Returns the projects Resource.
+
+ subAccounts()
+
Returns the subAccounts Resource.
+Close httplib2 connections.
- create(body=None, x__xgafv=None)
create(body=None, parent=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
@@ -92,11 +97,14 @@getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)
Gets the access control policy for a billing account. The caller must have the `billing.accounts.getIamPolicy` permission on the account, which is often given to billing account [viewers](https://cloud.google.com/billing/docs/how-to/billing-access).
- list(filter=None, pageSize=None, pageToken=None, x__xgafv=None)
list(filter=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
Retrieves the next page of results.
+
+ move(name, body=None, x__xgafv=None)
Changes which parent organization a billing account belongs to.
patch(name, body=None, updateMask=None, x__xgafv=None)
Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account.
@@ -113,7 +121,7 @@create(body=None, x__xgafv=None)
+ create(body=None, parent=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts. Args: @@ -127,6 +135,7 @@Method Details
"open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. } + parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -214,13 +223,14 @@Method Details
list(filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+ list(filter=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access). Args: filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported. pageSize: integer, Requested page size. The maximum page size is 100; this is also the default. pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned. + parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -256,6 +266,35 @@Method Details
move(name, body=None, x__xgafv=None)
+ Changes which parent organization a billing account belongs to. + +Args: + name: string, Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account. (required) + body: object, The request body. + The object takes the form of: + +{ # Request message for `MoveBillingAccount` RPC. + "destinationParent": "A String", # Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +}+
patch(name, body=None, updateMask=None, x__xgafv=None)
Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account. diff --git a/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html b/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html new file mode 100644 index 00000000000..b71c2d190ee --- /dev/null +++ b/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html @@ -0,0 +1,171 @@ + + + +Cloud Billing API . billingAccounts . subAccounts
+Instance Methods
++
+close()
Close httplib2 connections.
++
+create(parent, body=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
++
+list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
+ +Retrieves the next page of results.
+Method Details
+++ +close()
+Close httplib2 connections.+++ +create(parent, body=None, x__xgafv=None)
+This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts. + +Args: + parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required) + body: object, The request body. + The object takes the form of: + +{ # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +}+++ +list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access). + +Args: + parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required) + filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported. + pageSize: integer, Requested page size. The maximum page size is 100; this is also the default. + pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for `ListBillingAccounts`. + "billingAccounts": [ # A list of billing accounts. + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. + }, + ], + "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve. +}+++ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1.html b/docs/dyn/cloudbilling_v1.html index 1d4da4cfcfe..a8b7320e249 100644 --- a/docs/dyn/cloudbilling_v1.html +++ b/docs/dyn/cloudbilling_v1.html @@ -79,6 +79,11 @@list_next()
+Retrieves the next page of results. + + Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + + Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++Instance Methods
Returns the billingAccounts Resource.
++
+organizations()
+Returns the organizations Resource.
+ diff --git a/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html b/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html new file mode 100644 index 00000000000..0a77281241d --- /dev/null +++ b/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html @@ -0,0 +1,197 @@ + + + +Cloud Billing API . organizations . billingAccounts
+Instance Methods
++
+close()
Close httplib2 connections.
++
+create(parent, body=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
++
+list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
+ +Retrieves the next page of results.
++
+move(destinationParent, name, x__xgafv=None)
Changes which parent organization a billing account belongs to.
+Method Details
+++ +close()
+Close httplib2 connections.+++ +create(parent, body=None, x__xgafv=None)
+This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts. + +Args: + parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required) + body: object, The request body. + The object takes the form of: + +{ # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +}+++ +list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access). + +Args: + parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required) + filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported. + pageSize: integer, Requested page size. The maximum page size is 100; this is also the default. + pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for `ListBillingAccounts`. + "billingAccounts": [ # A list of billing accounts. + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. + }, + ], + "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve. +}+++ +list_next()
+Retrieves the next page of results. + + Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + + Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++++ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1.organizations.html b/docs/dyn/cloudbilling_v1.organizations.html new file mode 100644 index 00000000000..18fdabc9c7c --- /dev/null +++ b/docs/dyn/cloudbilling_v1.organizations.html @@ -0,0 +1,91 @@ + + + +move(destinationParent, name, x__xgafv=None)
+Changes which parent organization a billing account belongs to. + +Args: + destinationParent: string, Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`. (required) + name: string, Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +}+Cloud Billing API . organizations
+Instance Methods
+ +Returns the billingAccounts Resource.
+ ++
+close()
Close httplib2 connections.
+Method Details
+++ + \ No newline at end of file diff --git a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json index 843e45f4c9d..cc825aeec27 100644 --- a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json @@ -119,7 +119,13 @@ "httpMethod": "POST", "id": "cloudbilling.billingAccounts.create", "parameterOrder": [], - "parameters": {}, + "parameters": { + "parent": { + "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "query", + "type": "string" + } + }, "path": "v1/billingAccounts", "request": { "$ref": "BillingAccount" @@ -214,6 +220,11 @@ "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.", "location": "query", "type": "string" + }, + "parent": { + "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "query", + "type": "string" } }, "path": "v1/billingAccounts", @@ -226,6 +237,35 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "move": { + "description": "Changes which parent organization a billing account belongs to.", + "flatPath": "v1/billingAccounts/{billingAccountsId}:move", + "httpMethod": "POST", + "id": "cloudbilling.billingAccounts.move", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account.", + "location": "path", + "pattern": "^billingAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:move", + "request": { + "$ref": "MoveBillingAccountRequest" + }, + "response": { + "$ref": "BillingAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "patch": { "description": "Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account.", "flatPath": "v1/billingAccounts/{billingAccountsId}", @@ -363,6 +403,196 @@ ] } } + }, + "subAccounts": { + "methods": { + "create": { + "description": "This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.", + "flatPath": "v1/billingAccounts/{billingAccountsId}/subAccounts", + "httpMethod": "POST", + "id": "cloudbilling.billingAccounts.subAccounts.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "path", + "pattern": "^billingAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/subAccounts", + "request": { + "$ref": "BillingAccount" + }, + "response": { + "$ref": "BillingAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).", + "flatPath": "v1/billingAccounts/{billingAccountsId}/subAccounts", + "httpMethod": "GET", + "id": "cloudbilling.billingAccounts.subAccounts.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Requested page size. The maximum page size is 100; this is also the default.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "path", + "pattern": "^billingAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/subAccounts", + "response": { + "$ref": "ListBillingAccountsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "organizations": { + "resources": { + "billingAccounts": { + "methods": { + "create": { + "description": "This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.", + "flatPath": "v1/organizations/{organizationsId}/billingAccounts", + "httpMethod": "POST", + "id": "cloudbilling.organizations.billingAccounts.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/billingAccounts", + "request": { + "$ref": "BillingAccount" + }, + "response": { + "$ref": "BillingAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).", + "flatPath": "v1/organizations/{organizationsId}/billingAccounts", + "httpMethod": "GET", + "id": "cloudbilling.organizations.billingAccounts.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Requested page size. The maximum page size is 100; this is also the default.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/billingAccounts", + "response": { + "$ref": "ListBillingAccountsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "move": { + "description": "Changes which parent organization a billing account belongs to.", + "flatPath": "v1/organizations/{organizationsId}/billingAccounts/{billingAccountsId}:move", + "httpMethod": "GET", + "id": "cloudbilling.organizations.billingAccounts.move", + "parameterOrder": [ + "destinationParent", + "name" + ], + "parameters": { + "destinationParent": { + "description": "Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`.", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + }, + "name": { + "description": "Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account.", + "location": "path", + "pattern": "^billingAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+destinationParent}/{+name}:move", + "response": { + "$ref": "BillingAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } } } }, @@ -521,7 +751,7 @@ } } }, - "revision": "20231020", + "revision": "20231025", "rootUrl": "https://cloudbilling.googleapis.com/", "schemas": { "AggregationInfo": { @@ -826,6 +1056,17 @@ }, "type": "object" }, + "MoveBillingAccountRequest": { + "description": "Request message for `MoveBillingAccount` RPC.", + "id": "MoveBillingAccountRequest", + "properties": { + "destinationParent": { + "description": "Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`.", + "type": "string" + } + }, + "type": "object" + }, "Policy": { "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", diff --git a/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json b/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json index 8183e2a1889..55a985a975e 100644 --- a/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json +++ b/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json @@ -664,7 +664,7 @@ } } }, - "revision": "20231020", + "revision": "20231025", "rootUrl": "https://cloudbilling.googleapis.com/", "schemas": { "CacheFillRegions": { From 204fa5aa6b49170f4c7641fe147e51f361e1b9c1 Mon Sep 17 00:00:00 2001 From: Yoshi Automationclose()
+Close httplib2 connections.+Date: Tue, 31 Oct 2023 14:06:01 +0000 Subject: [PATCH 09/29] feat(compute): update the api #### compute:alpha The following keys were deleted: - schemas.NetworkEndpoint.properties.zone.type (Total Keys: 1) The following keys were added: - resources.instances.methods.deleteNetworkInterface (Total Keys: 23) - schemas.Commitment.properties.existingReservations (Total Keys: 2) - schemas.ResourceStatus.properties.lastInstanceTerminationDetails.$ref (Total Keys: 1) - schemas.ResourceStatusLastInstanceTerminationDetails (Total Keys: 3) #### compute:beta The following keys were deleted: - schemas.InstanceGroupManager.properties.baseInstanceName.annotations.required (Total Keys: 1) #### compute:v1 The following keys were deleted: - schemas.InstanceGroupManager.properties.baseInstanceName.annotations.required (Total Keys: 1) --- docs/dyn/compute_alpha.backendServices.html | 14 +- ...ute_alpha.globalNetworkEndpointGroups.html | 3 - docs/dyn/compute_alpha.instances.html | 145 ++++++++++++++++++ .../compute_alpha.networkEndpointGroups.html | 4 - .../compute_alpha.regionBackendServices.html | 12 +- docs/dyn/compute_alpha.regionCommitments.html | 15 ++ ...ute_alpha.regionNetworkEndpointGroups.html | 3 - .../documents/compute.alpha.json | 120 +++++++++++++-- .../documents/compute.beta.json | 7 +- .../discovery_cache/documents/compute.v1.json | 7 +- 10 files changed, 286 insertions(+), 44 deletions(-) diff --git a/docs/dyn/compute_alpha.backendServices.html b/docs/dyn/compute_alpha.backendServices.html index 893ad98a1d7..88e12120e2f 100644 --- a/docs/dyn/compute_alpha.backendServices.html +++ b/docs/dyn/compute_alpha.backendServices.html @@ -418,7 +418,7 @@ Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1162,7 +1162,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1748,7 +1748,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2316,7 +2316,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2782,7 +2782,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -3264,7 +3264,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -4324,7 +4324,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. diff --git a/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html b/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html index 7fa514bb510..31c566477c9 100644 --- a/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html +++ b/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html @@ -127,7 +127,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -402,7 +401,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -920,7 +918,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/docs/dyn/compute_alpha.instances.html b/docs/dyn/compute_alpha.instances.html index 586ad69f55c..a76ab2114c2 100644 --- a/docs/dyn/compute_alpha.instances.html +++ b/docs/dyn/compute_alpha.instances.html @@ -101,6 +101,9 @@Instance Methods
Deletes an access config from an instance's network interface.
++
+deleteNetworkInterface(project, zone, instance, networkInterfaceName, requestId=None, x__xgafv=None)
Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.
detachDisk(project, zone, instance, deviceName, requestId=None, x__xgafv=None)
Detaches a disk from an instance.
@@ -821,6 +824,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -1956,6 +1962,133 @@Method Details
}
deleteNetworkInterface(project, zone, instance, networkInterfaceName, requestId=None, x__xgafv=None)
+ Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now. + +Args: + project: string, Project ID for this request. (required) + zone: string, The name of the zone for this request. (required) + instance: string, The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer. (required) + networkInterfaceName: string, The name of the network interface to be deleted from the instance. Only VLAN network interface deletion is supported. (required) + requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/alpha/globalOperations) * [Regional](/compute/docs/reference/rest/alpha/regionOperations) * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources. + "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request. Not present otherwise. + "creationTimestamp": "A String", # [Deprecated] This field is deprecated. + "description": "A String", # [Output Only] A textual description of the operation, which is set when the operation is created. + "endTime": "A String", # [Output Only] The time that this operation was completed. This value is in RFC3339 text format. + "error": { # [Output Only] If errors are generated during processing of the operation, this field will be populated. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {"instanceLimit": "100/request"}, should be returned as, {"instanceLimitPerRequest": "100"}, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as `NOT FOUND`. + "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a `404` means the resource was not found. + "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is defined by the server. + "insertTime": "A String", # [Output Only] The time that this operation was requested. This value is in RFC3339 text format. + "instancesBulkInsertOperationMetadata": { + "perLocationStatus": { # Status information per location (location name is key). Example key: zones/us-central1-a + "a_key": { + "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far. + "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback. + "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an error. + "status": "A String", # [Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back. + "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created. + }, + }, + }, + "kind": "compute#operation", # [Output Only] Type of the resource. Always `compute#operation` for Operation resources. + "name": "A String", # [Output Only] Name of the operation. + "operationGroupId": "A String", # [Output Only] An ID that represents a group of operations, such as when a group of operations results from a `bulkInsert` API request. + "operationType": "A String", # [Output Only] The type of operation, such as `insert`, `update`, or `delete`, and so on. + "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses. + "region": "A String", # [Output Only] The URL of the region where the operation resides. Only applicable when performing regional operations. + "selfLink": "A String", # [Output Only] Server-defined URL for the resource. + "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "setCommonInstanceMetadataOperationMetadata": { # [Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state. + "clientOperationId": "A String", # [Output Only] The client operation id. + "perLocationOperations": { # [Output Only] Status information per location (location name is key). Example key: zones/us-central1-a + "a_key": { + "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # [Output Only] If state is `ABANDONED` or `FAILED`, this field is populated. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "state": "A String", # [Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`. + }, + }, + }, + "startTime": "A String", # [Output Only] The time that this operation was started by the server. This value is in RFC3339 text format. + "status": "A String", # [Output Only] The status of the operation, which can be one of the following: `PENDING`, `RUNNING`, or `DONE`. + "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the operation. + "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation of the target resource. + "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from. + "user": "A String", # [Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`. + "warnings": [ # [Output Only] If warning messages are generated during processing of the operation, this field will be populated. + { + "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response. + "data": [ # [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" } + { + "key": "A String", # [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). + "value": "A String", # [Output Only] A warning data value corresponding to the key. + }, + ], + "message": "A String", # [Output Only] A human-readable description of the warning code. + }, + ], + "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only applicable when performing per-zone operations. +}+
detachDisk(project, zone, instance, deviceName, requestId=None, x__xgafv=None)
Detaches a disk from an instance. @@ -2373,6 +2506,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -3387,6 +3523,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -3923,6 +4062,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -7913,6 +8055,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. diff --git a/docs/dyn/compute_alpha.networkEndpointGroups.html b/docs/dyn/compute_alpha.networkEndpointGroups.html index 25a04229089..452f84d77cf 100644 --- a/docs/dyn/compute_alpha.networkEndpointGroups.html +++ b/docs/dyn/compute_alpha.networkEndpointGroups.html @@ -262,7 +262,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -539,7 +538,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -1032,7 +1030,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], @@ -1084,7 +1081,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/docs/dyn/compute_alpha.regionBackendServices.html b/docs/dyn/compute_alpha.regionBackendServices.html index 04515df5d88..7830bd20995 100644 --- a/docs/dyn/compute_alpha.regionBackendServices.html +++ b/docs/dyn/compute_alpha.regionBackendServices.html @@ -391,7 +391,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -980,7 +980,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1549,7 +1549,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2016,7 +2016,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2499,7 +2499,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -3431,7 +3431,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. diff --git a/docs/dyn/compute_alpha.regionCommitments.html b/docs/dyn/compute_alpha.regionCommitments.html index 0d7a28a0351..c3630aa3808 100644 --- a/docs/dyn/compute_alpha.regionCommitments.html +++ b/docs/dyn/compute_alpha.regionCommitments.html @@ -143,6 +143,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -607,6 +610,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -764,6 +770,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -1051,6 +1060,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -1270,6 +1282,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. diff --git a/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html b/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html index 0a427d9ef5b..c36081ce1b6 100644 --- a/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html +++ b/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html @@ -128,7 +128,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -405,7 +404,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -927,7 +925,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/googleapiclient/discovery_cache/documents/compute.alpha.json b/googleapiclient/discovery_cache/documents/compute.alpha.json index 428a55324df..6a7fd6edd14 100644 --- a/googleapiclient/discovery_cache/documents/compute.alpha.json +++ b/googleapiclient/discovery_cache/documents/compute.alpha.json @@ -11418,6 +11418,59 @@ "https://www.googleapis.com/auth/compute" ] }, + "deleteNetworkInterface": { + "description": "Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/deleteNetworkInterface", + "httpMethod": "POST", + "id": "compute.instances.deleteNetworkInterface", + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterfaceName" + ], + "parameters": { + "instance": { + "description": "The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer.", + "location": "path", + "required": true, + "type": "string" + }, + "networkInterfaceName": { + "description": "The name of the network interface to be deleted from the instance. Only VLAN network interface deletion is supported.", + "location": "query", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/deleteNetworkInterface", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "detachDisk": { "description": "Detaches a disk from an instance.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/detachDisk", @@ -43489,7 +43542,7 @@ } } }, - "revision": "20231011", + "revision": "20231017", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -46786,7 +46839,7 @@ "type": "string" }, "ipAddressSelectionPolicy": { - "description": "Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", + "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", "enum": [ "IPV4_ONLY", "IPV6_ONLY", @@ -48694,6 +48747,13 @@ "description": "[Output Only] Commitment end time in RFC3339 text format.", "type": "string" }, + "existingReservations": { + "description": "Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation ", + "items": { + "type": "string" + }, + "type": "array" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -68023,10 +68083,6 @@ "description": "Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used.", "format": "int32", "type": "integer" - }, - "zone": { - "description": "The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group.", - "type": "string" } }, "type": "object" @@ -74156,7 +74212,7 @@ "description": "[Output Only] The Cloud Armor Managed Protection (CAMP) tier for this project. It can be one of the following values: CA_STANDARD, CAMP_PLUS_MONTHLY. If this field is not specified, it is assumed to be CA_STANDARD.", "enum": [ "CAMP_PLUS_ANNUAL", - "CAMP_PLUS_MONTHLY", + "CAMP_PLUS_PAYGO", "CA_STANDARD" ], "enumDescriptions": [ @@ -74313,7 +74369,7 @@ "description": "Managed protection tier to be set.", "enum": [ "CAMP_PLUS_ANNUAL", - "CAMP_PLUS_MONTHLY", + "CAMP_PLUS_PAYGO", "CA_STANDARD" ], "enumDescriptions": [ @@ -79603,6 +79659,10 @@ "description": "Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls.", "id": "ResourceStatus", "properties": { + "lastInstanceTerminationDetails": { + "$ref": "ResourceStatusLastInstanceTerminationDetails", + "description": "[Output Only] Contains last termination details why the instance was terminated." + }, "physicalHost": { "description": "[Output Only] An opaque ID of the host on which the VM is running.", "type": "string" @@ -79620,6 +79680,48 @@ }, "type": "object" }, + "ResourceStatusLastInstanceTerminationDetails": { + "id": "ResourceStatusLastInstanceTerminationDetails", + "properties": { + "terminationReason": { + "description": "Reason for termination", + "enum": [ + "BAD_BILLING_ACCOUNT", + "CLOUD_ABUSE_DETECTED", + "DISK_ERROR", + "FREE_TRIAL_EXPIRED", + "INSTANCE_UPDATE_REQUIRED_RESTART", + "INTERNAL_ERROR", + "KMS_REJECTION", + "MANAGED_INSTANCE_GROUP", + "OS_TERMINATED", + "PREEMPTED", + "SCHEDULED_STOP", + "SHUTDOWN_DUE_TO_MAINTENANCE", + "UNSPECIFIED_TERMINATION_REASON", + "USER_TERMINATED" + ], + "enumDescriptions": [ + "Terminated due to bad billing", + "Terminated by Cloud Abuse team", + "Terminated due to disk errors", + "Terminated due to free trial expired", + "Instance.update initiated which required RESTART", + "Terminated due to internal error", + "Terminated due to Key Management Service (KMS) key failure.", + "Terminated by managed instance group", + "Terminated from the OS level", + "Terminated due to preemption", + "Terminated due to scheduled stop", + "Terminated due to maintenance", + "The termination reason is not specified", + "Terminated by user" + ], + "type": "string" + } + }, + "type": "object" + }, "ResourceStatusScheduling": { "id": "ResourceStatusScheduling", "properties": { @@ -83415,7 +83517,7 @@ "description": "[Output Only] The minimum managed protection tier required for this rule. [Deprecated] Use requiredManagedProtectionTiers instead.", "enum": [ "CAMP_PLUS_ANNUAL", - "CAMP_PLUS_MONTHLY", + "CAMP_PLUS_PAYGO", "CA_STANDARD" ], "enumDescriptions": [ diff --git a/googleapiclient/discovery_cache/documents/compute.beta.json b/googleapiclient/discovery_cache/documents/compute.beta.json index 61664f1f295..67280f84748 100644 --- a/googleapiclient/discovery_cache/documents/compute.beta.json +++ b/googleapiclient/discovery_cache/documents/compute.beta.json @@ -40215,7 +40215,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -53800,11 +53800,6 @@ "type": "array" }, "baseInstanceName": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - }, "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", "pattern": "[a-z][-a-z0-9]{0,57}", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/compute.v1.json b/googleapiclient/discovery_cache/documents/compute.v1.json index 42e1a81811e..8377d3edeb1 100644 --- a/googleapiclient/discovery_cache/documents/compute.v1.json +++ b/googleapiclient/discovery_cache/documents/compute.v1.json @@ -35267,7 +35267,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -47650,11 +47650,6 @@ "type": "array" }, "baseInstanceName": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - }, "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", "pattern": "[a-z][-a-z0-9]{0,57}", "type": "string" From e6171f5380b7a0eb1fbf3a7b910f2c9fef0112b7 Mon Sep 17 00:00:00 2001 From: Yoshi AutomationDate: Tue, 31 Oct 2023 14:06:02 +0000 Subject: [PATCH 10/29] feat(connectors): update the api #### connectors:v1 The following keys were added: - schemas.EventSubscription.properties.jms.$ref (Total Keys: 1) - schemas.EventingConfigTemplate.properties.eventListenerType.type (Total Keys: 1) - schemas.JMS (Total Keys: 4) --- ...ations.connections.eventSubscriptions.html | 16 +++++ ...cations.providers.connectors.versions.html | 2 + .../documents/connectors.v1.json | 59 ++++++++++++++++++- .../documents/connectors.v2.json | 2 +- 4 files changed, 75 insertions(+), 4 deletions(-) diff --git a/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html b/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html index 59c9f2036ce..4f29dff02b0 100644 --- a/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html +++ b/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html @@ -129,6 +129,10 @@ Method Details
"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -234,6 +238,10 @@Method Details
"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -281,6 +289,10 @@Method Details
"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -337,6 +349,10 @@Method Details
"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. diff --git a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html index 5caac806cf1..b1bc66442f4 100644 --- a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html +++ b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html @@ -443,6 +443,7 @@Method Details
"valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. }, "enrichmentSupported": True or False, # Enrichment Supported. + "eventListenerType": "A String", # The type of the event listener for a specific connector. "isEventingSupported": True or False, # Is Eventing Supported. "registrationDestinationConfig": { # DestinationConfigTemplate defines required destinations supported by the Connector. # Registration host destination config template. "defaultPort": 42, # The default port. @@ -907,6 +908,7 @@Method Details
"valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. }, "enrichmentSupported": True or False, # Enrichment Supported. + "eventListenerType": "A String", # The type of the event listener for a specific connector. "isEventingSupported": True or False, # Is Eventing Supported. "registrationDestinationConfig": { # DestinationConfigTemplate defines required destinations supported by the Connector. # Registration host destination config template. "defaultPort": 42, # The default port. diff --git a/googleapiclient/discovery_cache/documents/connectors.v1.json b/googleapiclient/discovery_cache/documents/connectors.v1.json index 0de7446561a..4cc54ce292a 100644 --- a/googleapiclient/discovery_cache/documents/connectors.v1.json +++ b/googleapiclient/discovery_cache/documents/connectors.v1.json @@ -1832,7 +1832,7 @@ } } }, - "revision": "20231015", + "revision": "20231024", "rootUrl": "https://connectors.googleapis.com/", "schemas": { "AuditConfig": { @@ -2371,12 +2371,22 @@ "enum": [ "STATE_UNSPECIFIED", "REFRESHING", - "UPDATED" + "UPDATED", + "REFRESHING_SCHEMA_METADATA", + "UPDATED_SCHEMA_METADATA", + "REFRESH_SCHEMA_METADATA_FAILED", + "REFRESHING_FULL_SCHEMA", + "UPDATED_FULL_SCHEMA" ], "enumDescriptions": [ "Default state.", "Schema refresh is in progress.", - "Schema has been updated." + "Schema has been updated.", + "Schema refresh for metadata is in progress.", + "Schema metadata has been updated.", + "Failed to refresh schema metadata", + "Triggered full schema refresh", + "Updated full schema" ], "readOnly": true, "type": "string" @@ -2978,6 +2988,10 @@ "description": "Optional. Event type id of the event of current EventSubscription.", "type": "string" }, + "jms": { + "$ref": "JMS", + "description": "Optional. JMS is the source for the event listener." + }, "name": { "description": "Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription}", "type": "string" @@ -3184,6 +3198,20 @@ "description": "Enrichment Supported.", "type": "boolean" }, + "eventListenerType": { + "description": "The type of the event listener for a specific connector.", + "enum": [ + "EVENT_LISTENER_TYPE_UNSPECIFIED", + "WEBHOOK_LISTENER", + "JMS_LISTENER" + ], + "enumDescriptions": [ + "Default value.", + "Webhook listener. e.g. Jira, Zendesk, Servicenow etc.,", + "JMS Listener. e.g. IBM MQ, Rabbit MQ etc.," + ], + "type": "string" + }, "isEventingSupported": { "description": "Is Eventing Supported.", "type": "boolean" @@ -3789,6 +3817,31 @@ }, "type": "object" }, + "JMS": { + "description": "JMS message denotes the source of the event", + "id": "JMS", + "properties": { + "name": { + "description": "Optional. Name of the JMS source. i.e. queueName or topicName", + "type": "string" + }, + "type": { + "description": "Optional. Type of the JMS Source. i.e. Queue or Topic", + "enum": [ + "TYPE_UNSPECIFIED", + "QUEUE", + "TOPIC" + ], + "enumDescriptions": [ + "Default state.", + "JMS Queue.", + "JMS Topic." + ], + "type": "string" + } + }, + "type": "object" + }, "JsonSchema": { "description": "JsonSchema representation of schema metadata", "id": "JsonSchema", diff --git a/googleapiclient/discovery_cache/documents/connectors.v2.json b/googleapiclient/discovery_cache/documents/connectors.v2.json index 17b3e27c5ad..1cf0ec6c5a6 100644 --- a/googleapiclient/discovery_cache/documents/connectors.v2.json +++ b/googleapiclient/discovery_cache/documents/connectors.v2.json @@ -558,7 +558,7 @@ } } }, - "revision": "20231015", + "revision": "20231024", "rootUrl": "https://connectors.googleapis.com/", "schemas": { "Action": { From 84de9162568c3c9ac9a1589534a107512d88e14b Mon Sep 17 00:00:00 2001 From: Yoshi AutomationDate: Tue, 31 Oct 2023 14:06:02 +0000 Subject: [PATCH 11/29] feat(contentwarehouse): update the api #### contentwarehouse:v1 The following keys were deleted: - schemas.AssistantApiCoreTypesCalendarEvent.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesCalendarEventWrapper.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesDeviceConfig.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesDeviceId.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesDeviceUserIdentity.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesImage.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesSurfaceIdentity.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesSurfaceType.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesSurfaceVersion.deprecated (Total Keys: 1) - schemas.AssistantApiDate.deprecated (Total Keys: 1) - schemas.AssistantApiDateTime.deprecated (Total Keys: 1) - schemas.AssistantApiRecurrence.deprecated (Total Keys: 1) - schemas.AssistantApiRecurrenceDatetimeRange (Total Keys: 4) - schemas.AssistantApiTimeOfDay.deprecated (Total Keys: 1) - schemas.AssistantApiTimeZone.deprecated (Total Keys: 1) - schemas.AssistantApiTimestamp.deprecated (Total Keys: 1) - schemas.HumanSensingFaceAttribute (Total Keys: 8) - schemas.ImageData.properties.faceDetection.$ref (Total Keys: 1) - schemas.PhotosVisionServiceFaceFaceParams (Total Keys: 139) - schemas.PhotosVisionServiceFaceImageParams (Total Keys: 6) - schemas.PhotosVisionServiceFaceVersionedFaceSignature (Total Keys: 11) - schemas.QualityShoppingShoppingAttachmentProduct.properties.productPopularity (Total Keys: 2) - schemas.ReneFaceResponse (Total Keys: 4) - schemas.RepositoryWebrefMention.properties.timeOffsetConfidence (Total Keys: 2) - schemas.RepositoryWebrefMention.properties.timeOffsetMs (Total Keys: 2) - schemas.RepositoryWebrefSimplifiedCompositeDoc.properties.webrefOutlinksLegacy (Total Keys: 2) - schemas.RepositoryWebrefWebrefDocumentInfo.properties.outlinkInfos.$ref (Total Keys: 1) - schemas.ResearchVisionFace2cartoonAgeClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonChinLengthClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonEyeColorClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonEyeEyebrowDistanceClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonEyeShapeClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonEyeSlantClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonEyeVerticalPositionClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonEyebrowShapeClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonEyebrowThicknessClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonEyebrowWidthClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonFace2CartoonResults (Total Keys: 50) - schemas.ResearchVisionFace2cartoonFaceWidthClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonFacialHairClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonGenderClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonGlassesClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonHairColorClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonHairStyleClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonInterEyeDistanceClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonJawShapeClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonLipThicknessClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonMouthVerticalPositionClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonMouthWidthClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonNoseVerticalPositionClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonNoseWidthClassifierResults (Total Keys: 5) - schemas.ResearchVisionFace2cartoonSkinToneClassifierResults (Total Keys: 5) The following keys were added: - schemas.AssistantApiCoreTypesGovernedColor.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesGovernedDeviceConfig.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesGovernedDeviceId.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesGovernedRingtoneTaskMetadata.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesGovernedSurfaceIdentity.deprecated (Total Keys: 1) - schemas.AssistantApiCoreTypesGovernedSurfaceVersion.deprecated (Total Keys: 1) - schemas.AssistantApiDateTimeRange (Total Keys: 4) - schemas.AssistantGroundingRankerMediaGroundingProviderFeatures.properties.isSelfReportedSvodProvider.type (Total Keys: 1) - schemas.QualityNavboostCrapsCrapsData.properties.voterTokenCount (Total Keys: 2) - schemas.QualityNavboostCrapsFeatureCrapsData.properties.voterTokenBitmap.$ref (Total Keys: 1) - schemas.QualityNavboostGlueVoterTokenBitmapMessage (Total Keys: 7) - schemas.QualityNsrNsrData.properties.smallPersonalSite (Total Keys: 2) - schemas.TrawlerFetchReplyData.properties.webioInfo.$ref (Total Keys: 1) - schemas.TrawlerFetchReplyDataWebIOInfo (Total Keys: 5) The following keys were changed: - schemas.AssistantApiRecurrence.properties.blacklistedRanges.items.$ref (Total Keys: 1) --- .../documents/contentwarehouse.v1.json | 2696 ++++------------- 1 file changed, 532 insertions(+), 2164 deletions(-) diff --git a/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json b/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json index 433c36bddb0..2c259249e73 100644 --- a/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json +++ b/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json @@ -1156,7 +1156,7 @@ } } }, - "revision": "20231011", + "revision": "20231020", "rootUrl": "https://contentwarehouse.googleapis.com/", "schemas": { "AbuseiamAbuseType": { @@ -3733,7 +3733,8 @@ "NOTEBOOKLM_AFFINITY", "PLAYSPACE_LABS_AFFINITY", "ZOMBIE_CLOUD_AFFINITY", - "RELATIONSHIPS_AFFINITY" + "RELATIONSHIPS_AFFINITY", + "APPS_WORKFLOW_AFFINITY" ], "enumDeprecated": [ false, @@ -4007,6 +4008,7 @@ false, false, false, + false, false ], "enumDescriptions": [ @@ -4281,6 +4283,7 @@ "", "", "", + "", "" ], "type": "string" @@ -11388,8 +11391,7 @@ "type": "object" }, "AssistantApiCoreTypesCalendarEvent": { - "deprecated": true, - "description": "This proto contains the information of a calendar event, including title, start time, end time, etc. IMPORTANT: The definition of CalendarEvent proto is being moved to //assistant/api/core_types/governed/calendar_event_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. LINT.IfChange(CalendarEvent) NEXT_ID: 26", + "description": "This proto contains the information of a calendar event, including title, start time, end time, etc. LINT.IfChange(CalendarEvent) NEXT_ID: 26", "id": "AssistantApiCoreTypesCalendarEvent", "properties": { "attendees": { @@ -11723,8 +11725,7 @@ "type": "object" }, "AssistantApiCoreTypesCalendarEventWrapper": { - "deprecated": true, - "description": "This empty type allows us to publish sensitive calendar events to go/attentional-entities, while maintaining BUILD visibility protection for their contents. The BUILD-visibility-protected extension to this message is defined at http://google3/assistant/verticals/calendar/proto/multi_account_calendar_event.proto IMPORTANT: The definition of CalendarEventWrapper proto is being moved to //assistant/api/core_types/governed/calendar_event_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "This empty type allows us to publish sensitive calendar events to go/attentional-entities, while maintaining BUILD visibility protection for their contents. The BUILD-visibility-protected extension to this message is defined at http://google3/assistant/verticals/calendar/proto/multi_account_calendar_event.proto", "id": "AssistantApiCoreTypesCalendarEventWrapper", "properties": {}, "type": "object" @@ -11826,7 +11827,6 @@ "type": "object" }, "AssistantApiCoreTypesDeviceConfig": { - "deprecated": true, "description": "The identification information for third party devices that integrates with the assistant. All of these fields will be populated by the third party when the query is sent from the third party device. Next Id: 5", "id": "AssistantApiCoreTypesDeviceConfig", "properties": { @@ -11842,7 +11842,6 @@ "type": "object" }, "AssistantApiCoreTypesDeviceId": { - "deprecated": true, "description": "LINT.IfChange(DeviceId) Specifies identifier of a device AKA surface. Note there may be multiple device ids for the same physical device E.g. Allo app and Assistant app on Nexus. Note: DeviceId usage is complicated. Please do not depend on it for surface specific logic. Please use google3/assistant/api/capabilities.proto instead. IMPORTANT: When checking for equality between two `DeviceId`s, you should always use an `isSameDevice{As}` function to check for equality, as deep equality between `DeviceId`'s is not guaranteed. * C++: http://google3/assistant/assistant_server/util/device_id_util.cc;l=23;rcl=421295740 * Dart: http://google3/assistant/context/util/lib/device_id.dart;l=26;rcl=442126145 * Java: http://google3/java/com/google/assistant/assistantserver/utils/DeviceIdHelper.java;l=9;rcl=390378522 See http://go/deviceid-equality for more details. Next ID: 14", "id": "AssistantApiCoreTypesDeviceId", "properties": { @@ -11903,8 +11902,7 @@ "type": "object" }, "AssistantApiCoreTypesDeviceUserIdentity": { - "deprecated": true, - "description": "IMPORTANT: The definition of DeviceUserIdentity is being moved to //assistant/api/core_types/governed/device_user_identity.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new DeviceUserIdentity instead of this one. // LINT.IfChange", + "description": "LINT.IfChange", "id": "AssistantApiCoreTypesDeviceUserIdentity", "properties": { "deviceId": { @@ -11920,7 +11918,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedColor": { - "description": "Represents a color in the RGBA color space. This message mirrors google.type.Color.", + "deprecated": true, + "description": "LINT.IfChange Represents a color in the RGBA color space. This message mirrors google.type.Color. IMPORTANT: The definition of Color proto is being moved to //assistant/api/core_types/color_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", "id": "AssistantApiCoreTypesGovernedColor", "properties": { "alpha": { @@ -11947,7 +11946,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedDeviceConfig": { - "description": "The identification information for third party devices that integrates with the assistant. All of these fields will be populated by the third party when the query is sent from the third party device. Next Id: 5", + "deprecated": true, + "description": "The identification information for third party devices that integrates with the assistant. All of these fields will be populated by the third party when the query is sent from the third party device. IMPORTANT: The definition of DeviceConfig proto is being moved to //assistant/api/core_types/device_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. Next Id: 5", "id": "AssistantApiCoreTypesGovernedDeviceConfig", "properties": { "agentId": { @@ -11962,7 +11962,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedDeviceId": { - "description": "LINT.IfChange Specifies identifier of a device AKA surface. Note there may be multiple device ids for the same physical device E.g. Allo app and Assistant app on Nexus. Note: DeviceId usage is complicated. Please do not depend on it for surface specific logic. Please use google3/assistant/api/capabilities.proto instead. IMPORTANT: When checking for equality between two `DeviceId`s, you should always use an `isSameDevice{As}` function to check for equality, as deep equality between `DeviceId`'s is not guaranteed. * C++: http://google3/assistant/assistant_server/util/device_id_util.cc;l=23;rcl=421295740 * Dart: http://google3/assistant/context/util/lib/device_id.dart;l=26;rcl=442126145 * Java: http://google3/java/com/google/assistant/assistantserver/utils/DeviceIdHelper.java;l=9;rcl=390378522 See http://go/deviceid-equality for more details. Next ID: 14", + "deprecated": true, + "description": "LINT.IfChange Specifies identifier of a device AKA surface. Note there may be multiple device ids for the same physical device E.g. Allo app and Assistant app on Nexus. Note: DeviceId usage is complicated. Please do not depend on it for surface specific logic. Please use google3/assistant/api/capabilities.proto instead. IMPORTANT: When checking for equality between two `DeviceId`s, you should always use an `isSameDevice{As}` function to check for equality, as deep equality between `DeviceId`'s is not guaranteed. * C++: http://google3/assistant/assistant_server/util/device_id_util.cc;l=23;rcl=421295740 * Dart: http://google3/assistant/context/util/lib/device_id.dart;l=26;rcl=442126145 * Java: http://google3/java/com/google/assistant/assistantserver/utils/DeviceIdHelper.java;l=9;rcl=390378522 See http://go/deviceid-equality for more details. IMPORTANT: The definition of DeviceId proto is being moved to //assistant/api/core_types/device_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. Next ID: 14", "id": "AssistantApiCoreTypesGovernedDeviceId", "properties": { "agsaClientInstanceId": { @@ -12022,7 +12023,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedRingtoneTaskMetadata": { - "description": "Task metadata information describing the ringtone. Next id: 11", + "deprecated": true, + "description": "LINT.IfChange Task metadata information describing the ringtone. IMPORTANT: The definition of RingtoneTaskMetadata proto is being moved to //assistant/api/core_types/ringtone_task_metadata.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. Next id: 11", "id": "AssistantApiCoreTypesGovernedRingtoneTaskMetadata", "properties": { "category": { @@ -12340,7 +12342,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedSurfaceIdentity": { - "description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. NEXT ID: 6 LINT.IfChange", + "deprecated": true, + "description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. IMPORTANT: The partial migration to the SurfaceIdentity and SurfaceVersion protos defined here is being rolled back (b/303012824). All existing references will be updated to point back to //assistant/api/core_types/surface_identity.proto. If you are adding a reference, use the SurfaceIdentity and SurfaceVersion protos defined there. NEXT ID: 6 LINT.IfChange", "id": "AssistantApiCoreTypesGovernedSurfaceIdentity", "properties": { "deviceId": { @@ -12667,6 +12670,7 @@ "type": "object" }, "AssistantApiCoreTypesGovernedSurfaceVersion": { + "deprecated": true, "description": "The version of the surface/client. New surfaces are encouraged to only use the \u201cmajor\u201d field to keep track of version number. The \u201cminor\u201d field may be used for surfaces that rely on both the \u201cmajor\u201d and \u201cminor\u201d fields to define their version.", "id": "AssistantApiCoreTypesGovernedSurfaceVersion", "properties": { @@ -12697,8 +12701,7 @@ "type": "object" }, "AssistantApiCoreTypesImage": { - "deprecated": true, - "description": "An image represents the data about an image or a photo. IMPORTANT: The definition of the Image message is being moved to //assistant/api/core_types/governed/image_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new Image message instead of this one. LINT.IfChange NextId: 13", + "description": "An image represents the data about an image or a photo. LINT.IfChange NextId: 13", "id": "AssistantApiCoreTypesImage", "properties": { "accessibilityText": { @@ -13124,8 +13127,7 @@ "type": "object" }, "AssistantApiCoreTypesSurfaceIdentity": { - "deprecated": true, - "description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. NEXT ID: 6 IMPORTANT: The definitions of the SurfaceIdentity and SurfaceVersion protos are being moved to //assistant/api/core_types/governed/surface_identity.proto All existing references will be updated to point to the new location. If you are adding a reference, use the new SurfaceIdentity and SurfaceVersion protos instead of the protos defined here. LINT.IfChange", + "description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. NEXT ID: 6 LINT.IfChange", "id": "AssistantApiCoreTypesSurfaceIdentity", "properties": { "deviceId": { @@ -13446,7 +13448,7 @@ }, "surfaceTypeString": { "deprecated": true, - "description": "DEPRECATED. assistant.api.core_types.governed.SurfaceIdentity.surface_type field should be used instead. The device's surface type. This is the string version of the assistant.api.core_types.SurfaceType enum. The server should not use this field, rather it should use the SurfaceType value derived from this string.", + "description": "DEPRECATED. The legacy device's surface type string. NOTE: Prefer using the ontological `surface_type` field. The device's surface type. This is the string version of the assistant.api.core_types.SurfaceType enum. The server should not use this field, rather it should use the SurfaceType value derived from this string.", "type": "string" }, "surfaceVersion": { @@ -13457,7 +13459,6 @@ "type": "object" }, "AssistantApiCoreTypesSurfaceType": { - "deprecated": true, "description": "Specifies the types of device surfaces. LINT.IfChange When adding new surface types make sure that My Activity (https://myactivity.google.com/product/assistant) will correctly render by adding your enum to http://cs/symbol:GetAssistSurfaceName%20f:%5C.cc$ If your type doesn't fit in to any of the existing surfaces messages, add a new message in http://google3/personalization/footprints/boq/uservisible/events/intl/smh_frontend_messages.h.", "id": "AssistantApiCoreTypesSurfaceType", "properties": { @@ -13651,7 +13652,6 @@ "type": "object" }, "AssistantApiCoreTypesSurfaceVersion": { - "deprecated": true, "description": "The version of the surface/client. New surfaces are encouraged to only use the \u201cmajor\u201d field to keep track of version number. The \u201cminor\u201d field may be used for surfaces that rely on both the \u201cmajor\u201d and \u201cminor\u201d fields to define their version.", "id": "AssistantApiCoreTypesSurfaceVersion", "properties": { @@ -13723,8 +13723,7 @@ "type": "object" }, "AssistantApiDate": { - "deprecated": true, - "description": "A Gregorian calendar date. IMPORTANT: The definition of Date proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "A Gregorian calendar date.", "id": "AssistantApiDate", "properties": { "day": { @@ -13746,8 +13745,7 @@ "type": "object" }, "AssistantApiDateTime": { - "deprecated": true, - "description": "A date-time specification, combining a date and civil time (relative to a given timezone). IMPORTANT: The definition of DateTime proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "A date-time specification, combining a date and civil time (relative to a given timezone).", "id": "AssistantApiDateTime", "properties": { "date": { @@ -13765,6 +13763,21 @@ }, "type": "object" }, + "AssistantApiDateTimeRange": { + "description": "A representation of a range of time with start and end datetime specified.", + "id": "AssistantApiDateTimeRange", + "properties": { + "endDate": { + "$ref": "AssistantApiDateTime", + "description": "End date of the range." + }, + "startDate": { + "$ref": "AssistantApiDateTime", + "description": "Start date of the range." + } + }, + "type": "object" + }, "AssistantApiDeviceCapabilities": { "description": "This message describes roughly what a surface is capable of doing and metadata around those capabilities. These capabilities are determined based on: - device hardware - software - status (e.g. volume level, battery percentage) These capabilities refer to the surface and not the physical device. The list of supported surfaces can be found in the assistant.api.core_types.SurfaceType enum. A surface's capabilities can differ from the device's. An example would be ANDROID_ALLO running on Pixel. Allo does not support AudioInput while the Pixel does. In this case, audio_input will be set to false for Assistant Allo requests while it might be set to true for OPA_NEXUS requests. Next ID: 36", "id": "AssistantApiDeviceCapabilities", @@ -15137,8 +15150,7 @@ "type": "object" }, "AssistantApiRecurrence": { - "deprecated": true, - "description": "Date-based recurrences specify repeating events. Conceptually, a recurrence is a (possibly unbounded) sequence of dates on which an event falls, described by a list of constraints. A date is in a recurrence if and only if it satisfies all of the constraints. Note that devices may support some constraints, but not all. IMPORTANT: The definition of Recurrence proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "Date-based recurrences specify repeating events. Conceptually, a recurrence is a (possibly unbounded) sequence of dates on which an event falls, described by a list of constraints. A date is in a recurrence if and only if it satisfies all of the constraints. Note that devices may support some constraints, but not all.", "id": "AssistantApiRecurrence", "properties": { "begin": { @@ -15148,7 +15160,7 @@ "blacklistedRanges": { "description": "A list of blacklisted dates to skip the alarm on.", "items": { - "$ref": "AssistantApiRecurrenceDatetimeRange" + "$ref": "AssistantApiDateTimeRange" }, "type": "array" }, @@ -15201,21 +15213,6 @@ }, "type": "object" }, - "AssistantApiRecurrenceDatetimeRange": { - "description": "A representation of a range of time with start and end datetime specified.", - "id": "AssistantApiRecurrenceDatetimeRange", - "properties": { - "endDate": { - "$ref": "AssistantApiDateTime", - "description": "End date of the range." - }, - "startDate": { - "$ref": "AssistantApiDateTime", - "description": "Start date of the range." - } - }, - "type": "object" - }, "AssistantApiScreenCapabilities": { "description": "These capabilities represent the tactile features associated with the device. This includes, for example, whether the device has a screen, how big the screen is, and privacy of the screen. Next ID: 11", "id": "AssistantApiScreenCapabilities", @@ -18130,8 +18127,7 @@ "type": "object" }, "AssistantApiTimeOfDay": { - "deprecated": true, - "description": "A civil time relative to a timezone. IMPORTANT: The definition of TimeOfDay proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "A civil time relative to a timezone.", "id": "AssistantApiTimeOfDay", "properties": { "hour": { @@ -18158,8 +18154,7 @@ "type": "object" }, "AssistantApiTimeZone": { - "deprecated": true, - "description": "A time zone. Conceptually, a time zone is a set of rules associated with a location that describes a UTC offset and how it changes over time (e.g. Daylight Saving Time). The offset is used to compute the local date and time. IMPORTANT: The definition of TimeZone enum is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "A time zone. Conceptually, a time zone is a set of rules associated with a location that describes a UTC offset and how it changes over time (e.g. Daylight Saving Time). The offset is used to compute the local date and time.", "id": "AssistantApiTimeZone", "properties": { "ianaId": { @@ -18170,8 +18165,7 @@ "type": "object" }, "AssistantApiTimestamp": { - "deprecated": true, - "description": "An absolute point in time independent of timezone or calendar, based on the proto3 Timestamp (//google/protobuf/timestamp.proto). IMPORTANT: The definition of Timestamp proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. NOTE: THIS IS NO LONGER RECOMMENDED TO BE USED. It was originally defined separately from google.protobuf.Timestamp due to incompatibility with proto2 syntax. The incompatibility issues have since been resolved and so the Google-wide standard representation of google.protobuf.Timestamp should be preferred. In fact, google.protobuf.* protos in general are now recommended to be used in new APIs.", + "description": "An absolute point in time independent of timezone or calendar, based on the proto3 Timestamp (//google/protobuf/timestamp.proto). NOTE: THIS IS NO LONGER RECOMMENDED TO BE USED. It was originally defined separately from google.protobuf.Timestamp due to incompatibility with proto2 syntax. The incompatibility issues have since been resolved and so the Google-wide standard representation of google.protobuf.Timestamp should be preferred. In fact, google.protobuf.* protos in general are now recommended to be used in new APIs.", "id": "AssistantApiTimestamp", "properties": { "nanos": { @@ -18239,7 +18233,7 @@ "type": "object" }, "AssistantContextAppProviderId": { - "description": "LINT.IfChanged Identifier for an application provider. NOTE: AppProviderId contains surface-specific info, such as the Android package name of the application. This was necessary for supporting current use cases that rely on surface-specific info in feature code. Eventually we want to deprecate AppProviderId and fetch surface-specific info in some other way (e.g. in a surface-translation layer). But until then, we may continue extending AppProviderId with other surface-specific info.", + "description": "LINT.IfChange Identifier for an application provider. NOTE: AppProviderId contains surface-specific info, such as the Android package name of the application. This was necessary for supporting current use cases that rely on surface-specific info in feature code. Eventually we want to deprecate AppProviderId and fetch surface-specific info in some other way (e.g. in a surface-translation layer). But until then, we may continue extending AppProviderId with other surface-specific info.", "id": "AssistantContextAppProviderId", "properties": { "activityClassName": { @@ -19485,7 +19479,8 @@ "NAME_CORRECTION_LOG", "FUZZY_CONTACT_MATCH", "NEURAL_CONTACT_MATCH", - "NEURAL_CONTACT_MATCH_DARK_LAUNCH" + "NEURAL_CONTACT_MATCH_DARK_LAUNCH", + "PERSONALIZED_NAME_CORRECTION_LOG" ], "enumDescriptions": [ "", @@ -19494,7 +19489,8 @@ "Alternate name from contact correction history.", "Fuzzy match with user's contacts.", "Neural match. See go/phonetic-contact-match.", - "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it." + "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", + "Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." ], "type": "string" } @@ -19926,7 +19922,7 @@ "type": "object" }, "AssistantGroundingRankerMediaGroundingProviderFeatures": { - "description": "Features to be passed from Media GP to HGR. Next ID: 20", + "description": "Features to be passed from Media GP to HGR. Next ID: 21", "id": "AssistantGroundingRankerMediaGroundingProviderFeatures", "properties": { "albumReleaseType": { @@ -19996,6 +19992,10 @@ "description": "True if the user requests seed radio.", "type": "boolean" }, + "isSelfReportedSvodProvider": { + "description": "Provider is a self(user) reported subscripted provider https://g3doc.corp.google.com/knowledge/g3doc/ump/development/GetProviderAffinity.md?cl=head", + "type": "boolean" + }, "isYoutubeMusicSeeking": { "description": "Indicates whether this is youtube content seeking music.", "type": "boolean" @@ -20632,7 +20632,8 @@ "NAME_CORRECTION_LOG", "FUZZY_CONTACT_MATCH", "NEURAL_CONTACT_MATCH", - "NEURAL_CONTACT_MATCH_DARK_LAUNCH" + "NEURAL_CONTACT_MATCH_DARK_LAUNCH", + "PERSONALIZED_NAME_CORRECTION_LOG" ], "enumDescriptions": [ "", @@ -20641,7 +20642,8 @@ "Alternate name from contact correction history.", "Fuzzy match with user's contacts.", "Neural match. See go/phonetic-contact-match.", - "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it." + "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", + "Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." ], "type": "string" }, @@ -32762,7 +32764,7 @@ "type": "object" }, "GeostoreCityJsonProto": { - "description": "CityJsonProto is a custom proto representation of the portion of the CityJSON spec (https://www.cityjson.org/) relevant to internal projects. See go/cityjsonproto-design for more information about the modeling and design decisions implemented here.", + "description": "CityJsonProto is a custom proto representation of the portion of the CityJSON spec (https://www.cityjson.org/) relevant to internal projects. See go/cityjsonproto-design for more information about the modeling and design decisions implemented here. LINT.IfChange", "id": "GeostoreCityJsonProto", "properties": { "cityObjects": { @@ -32820,7 +32822,7 @@ "type": "object" }, "GeostoreCityJsonProtoCityObjectGeometry": { - "description": "Representation of geometry. Geometries vary both in type and in level-of-detail, enabling representation of any shape at any level of granularity.", + "description": "Representation of geometry including geometric primitives which are used as building blocks to construct geometries of varying complexity. Geometries vary both in type and in level-of-detail, enabling representation of any shape at any level of granularity. All geometries are ultimately composed of `MultiPoint`s, which reference the actual vertices. Only linear and planar shapes are allowed, no curves or parametric surfaces.", "id": "GeostoreCityJsonProtoCityObjectGeometry", "properties": { "lod": { @@ -33408,6 +33410,7 @@ "PROVIDER_GOOGLE_GEO_NG_LOCAL", "PROVIDER_GOOGLE_MAPFACTS_CLEANUP", "PROVIDER_GOOGLE_THIRD_PARTY_UGC", + "PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", "PROVIDER_GOOGLE_LOCALSEARCH", "PROVIDER_GOOGLE_TRANSIT", "PROVIDER_GOOGLE_GEOWIKI", @@ -34131,6 +34134,7 @@ false, false, false, + false, true, false, false, @@ -34672,7 +34676,7 @@ "", "ABSTRACT", "", - "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B7", + "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B8", "ABSTRACT", "", "", @@ -34850,6 +34854,7 @@ "", "", "UMBRELLA", + "", "The next new \"Google\" provider entries should be placed above.", "UMBRELLA", "", @@ -39717,6 +39722,7 @@ "PROVIDER_GOOGLE_GEO_NG_LOCAL", "PROVIDER_GOOGLE_MAPFACTS_CLEANUP", "PROVIDER_GOOGLE_THIRD_PARTY_UGC", + "PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", "PROVIDER_GOOGLE_LOCALSEARCH", "PROVIDER_GOOGLE_TRANSIT", "PROVIDER_GOOGLE_GEOWIKI", @@ -40440,6 +40446,7 @@ false, false, false, + false, true, false, false, @@ -40981,7 +40988,7 @@ "", "ABSTRACT", "", - "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B7", + "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B8", "ABSTRACT", "", "", @@ -41159,6 +41166,7 @@ "", "", "UMBRELLA", + "", "The next new \"Google\" provider entries should be placed above.", "UMBRELLA", "", @@ -42653,6 +42661,7 @@ "PROVIDER_GOOGLE_GEO_NG_LOCAL", "PROVIDER_GOOGLE_MAPFACTS_CLEANUP", "PROVIDER_GOOGLE_THIRD_PARTY_UGC", + "PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", "PROVIDER_GOOGLE_LOCALSEARCH", "PROVIDER_GOOGLE_TRANSIT", "PROVIDER_GOOGLE_GEOWIKI", @@ -43376,6 +43385,7 @@ false, false, false, + false, true, false, false, @@ -43917,7 +43927,7 @@ "", "ABSTRACT", "", - "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B7", + "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B8", "ABSTRACT", "", "", @@ -44095,6 +44105,7 @@ "", "", "UMBRELLA", + "", "The next new \"Google\" provider entries should be placed above.", "UMBRELLA", "", @@ -45892,6 +45903,7 @@ "PROVIDER_GOOGLE_GEO_NG_LOCAL", "PROVIDER_GOOGLE_MAPFACTS_CLEANUP", "PROVIDER_GOOGLE_THIRD_PARTY_UGC", + "PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", "PROVIDER_GOOGLE_LOCALSEARCH", "PROVIDER_GOOGLE_TRANSIT", "PROVIDER_GOOGLE_GEOWIKI", @@ -46615,6 +46627,7 @@ false, false, false, + false, true, false, false, @@ -47156,7 +47169,7 @@ "", "ABSTRACT", "", - "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B7", + "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B8", "ABSTRACT", "", "", @@ -47334,6 +47347,7 @@ "", "", "UMBRELLA", + "", "The next new \"Google\" provider entries should be placed above.", "UMBRELLA", "", @@ -60877,168 +60891,6 @@ }, "type": "object" }, - "HumanSensingFaceAttribute": { - "description": "Defines a generic attribute. The name field is the name of the attribute (for example beard, glasses, joy). The confidence defines how reliable the given annotation is. For binary attributes it is bounded between 0 and 1 and can be interpreted as the posterior probability. The value field can be used for continuous attributes like age. Information returned or stored in this message may be sensitive from a privacy, policy, or legal point of view. Clients should consult with their p-counsels and the privacy working group (go/pwg) to make sure their use respects Google policies.", - "id": "HumanSensingFaceAttribute", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "name": { - "type": "string" - }, - "type": { - "enum": [ - "TYPE_UNKNOWN", - "FREE_FORM", - "FEMALE", - "MALE", - "AGE", - "NON_HUMAN", - "GLASSES", - "DARK_GLASSES", - "HEADWEAR", - "EYES_VISIBLE", - "LEFT_EYELID_CLOSED", - "RIGHT_EYELID_CLOSED", - "MOUTH_OPEN", - "FACIAL_HAIR", - "LONG_HAIR", - "FRONTAL_GAZE", - "SMILING", - "UNDER_EXPOSED", - "BLURRED", - "LEFT_EYE_VISIBLE", - "RIGHT_EYE_VISIBLE", - "LEFT_EAR_VISIBLE", - "RIGHT_EAR_VISIBLE", - "NOSE_TIP_VISIBLE", - "MOUTH_CENTER_VISIBLE", - "LOWER_FACE_COVERED", - "AMUSEMENT", - "ANGER", - "CONCENTRATION", - "CONFUSION", - "CONTENTMENT", - "DESIRE", - "DISAPPOINTMENT", - "DISGUST", - "ELATION", - "EMBARRASSMENT", - "INTEREST", - "LOVE", - "PAIN", - "PRIDE", - "RELIEF", - "SADNESS", - "SURPRISE", - "CANDID", - "POSED" - ], - "enumDeprecated": [ - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false - ], - "enumDescriptions": [ - "", - "", - "Attribute types that describe the gender of a face. For an attribute if type FEMALE the confidence represent the probability of a face to be from a female person. Similarly, for an attribute of type MALE the confidence is the probability of a face to be from a male person. 4 is reserved for OTHER_GENDER.", - "", - "Attribute type that represent the age of the face. For an attribute of this type the field value represent the age. Values are assumed to be in the range [0, 95].", - "This attributes is used to distinguish actual human faces from other possible face detections like face of sculptures, cartoons faces, and some false detections.", - "Attributes types that describes face appearances/configurations (mouth open, eyes visibles and looking into the camera, smiling) and props (glasses, dark glasses, and headwear).", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Attributes for the visibility of face landmarks. The landmarks refers to a single point in the image, so the eyes are visible if their center is visible, the ears are visible if the ear tragion is visible.", - "", - "", - "", - "", - "", - "An attribute describing if the lower part of a face is covered by something like a face mask, a scarf or any other type of covering. The expectation is for both the mouth and the nose tip to be covered. This is useful for labeling faces in images captured during the Covid pandemic.", - "FeelNet expressions.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "value": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, "I18nPhonenumbersPhoneNumber": { "description": "The PhoneNumber object that is used by all LibPhoneNumber API's to fully represent a phone number.", "id": "I18nPhonenumbersPhoneNumber", @@ -61542,7 +61394,7 @@ "type": "object" }, "ImageData": { - "description": "This defines the per-doc data which is extracted from thumbnails and propagated over to indexing. It contains all information that can be used for restricts. Next tag id: 132", + "description": "This defines the per-doc data which is extracted from thumbnails and propagated over to indexing. It contains all information that can be used for restricts. Next tag id: 131", "id": "ImageData", "properties": { "adaboostImageFeaturePorn": { @@ -61666,10 +61518,6 @@ "$ref": "PhotosImageMetadata", "description": "The EXIF generated by photos backend team's (more specifically FIFE's) thumbnailer library. This exif model is more comprehensive since a dedicated team is constantly improving it and adding new fields over time. This is currently populated by moonshine for selected corpora." }, - "faceDetection": { - "$ref": "ReneFaceResponse", - "description": "Face Detection." - }, "featuredImageProp": { "$ref": "ImageMonetizationFeaturedImageProperties", "description": "Properties used in featured imagesearch project. inspiration_score indicates how well an image is related to products, or how inspirational it is." @@ -63385,7 +63233,8 @@ "GENUS_SEARCH_SPORTS", "GENUS_BUSINESSMESSAGING", "GENUS_AERIAL_VIEW", - "GENUS_DOCS_FLIX_RENDER" + "GENUS_DOCS_FLIX_RENDER", + "GENUS_SHOPPING" ], "enumDescriptions": [ "", @@ -63436,7 +63285,8 @@ "Genus for Search Sports vertical videos", "Genus for Business Messaging videos", "Genus for Geo Aerial View", - "Genus for Flix Render (Docs)" + "Genus for Flix Render (Docs)", + "Genus for CDS videos processed through Amarna." ], "type": "string" }, @@ -64336,7 +64186,8 @@ "GENUS_SEARCH_SPORTS", "GENUS_BUSINESSMESSAGING", "GENUS_AERIAL_VIEW", - "GENUS_DOCS_FLIX_RENDER" + "GENUS_DOCS_FLIX_RENDER", + "GENUS_SHOPPING" ], "enumDescriptions": [ "", @@ -64387,7 +64238,8 @@ "Genus for Search Sports vertical videos", "Genus for Business Messaging videos", "Genus for Geo Aerial View", - "Genus for Flix Render (Docs)" + "Genus for Flix Render (Docs)", + "Genus for CDS videos processed through Amarna." ], "type": "string" }, @@ -91252,6 +91104,7 @@ "NOTEBOOKLM", "ZOMBIE_CLOUD", "RELATIONSHIPS", + "APPS_WORKFLOW", "DEPRECATED_QUICKSTART_FLUME", "DUO_CLIENT", "ALBERT", @@ -91908,97 +91761,98 @@ false, false, false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, false, false, false, @@ -92564,6 +92418,7 @@ "Notebooklm Team contact: labs-tailwind-eng-team@google.com", "Zombie Cloud Team contact: zombie-cloud-eng@google.com", "Relationships Team contact: hana-dev@google.com", + "Apps Workflow Team contact: workflows-frontend-eng@google.com", "", "Duo Client Team contact: duo-eng@google.com", "Project albert (go/albert-frontend) Team contact: albert-eng@google.com", @@ -96430,578 +96285,6 @@ }, "type": "object" }, - "PhotosVisionServiceFaceFaceParams": { - "description": "FaceParams are a collection of parameters of a single face found in an image. WARNING: This message has a jspb target. If you add a new message field inside, either put its definition inside this message as well or add the js file corresponding to the new message to the js_deps and proto_js rules in the BUILD file; otherwise it will break lots of builds. The js file name is the message name all in lowercase letters. Next available id: 40.", - "id": "PhotosVisionServiceFaceFaceParams", - "properties": { - "age": { - "description": "The age of the face. Range [0.0, 120.0].", - "format": "float", - "type": "number" - }, - "angerProbability": { - "format": "float", - "type": "number" - }, - "attribute": { - "description": "Attributes for the detected face. Information returned or stored in this message may be sensitive from a privacy, policy, or legal point of view. Clients should consult with their p-counsels and the privacy working group (go/pwg) to make sure their use respects Google policies.", - "items": { - "$ref": "HumanSensingFaceAttribute" - }, - "type": "array" - }, - "beardProbability": { - "format": "float", - "type": "number" - }, - "blurredProbability": { - "format": "float", - "type": "number" - }, - "boundingBox": { - "$ref": "PhotosVisionServiceFaceFaceParamsBoundingBox", - "description": "Bounding box around the face. The coordinates of the bounding box are in the original image's scale as returned in ImageParams. The bounding box is computed to \"frame\" the face as a human would expect, and is typically used in UI (e.g. G+ to show circles around detected faces). It is based on the landmarker results." - }, - "darkGlassesProbability": { - "format": "float", - "type": "number" - }, - "detectionConfidence": { - "description": "Confidence is in the range [0,1].", - "format": "float", - "type": "number" - }, - "extendedLandmarks": { - "items": { - "$ref": "PhotosVisionServiceFaceFaceParamsExtendedLandmark" - }, - "type": "array" - }, - "eyesClosedProbability": { - "format": "float", - "type": "number" - }, - "face2cartoonResults": { - "$ref": "ResearchVisionFace2cartoonFace2CartoonResults", - "description": "Attributes of the detected face useful for generating a cartoon version of the face." - }, - "faceCropV8": { - "$ref": "PhotosVisionServiceFaceFaceParamsFaceCropV8" - }, - "fdBoundingBox": { - "$ref": "PhotosVisionServiceFaceFaceParamsBoundingBox", - "description": "This other bounding box is tighter than the previous one, and encloses only the skin part of the face. It is typically used to eliminate the face from any image analysis that looks up the \"amount of skin\" visible in an image (e.g. safesearch content score). It is not based on the landmarker results, just on the initial face detection, hence the 'fd' prefix." - }, - "femaleProbability": { - "description": "Probability is in the range [0,1].", - "format": "float", - "type": "number" - }, - "frontalGazeProbability": { - "format": "float", - "type": "number" - }, - "glassesProbability": { - "format": "float", - "type": "number" - }, - "headwearProbability": { - "format": "float", - "type": "number" - }, - "imageParams": { - "$ref": "PhotosVisionServiceFaceImageParams", - "description": "A copy of the 'image_params' field that is also returned as part of the ExtractFacesReply. It contains the with and height of the image the face extraction was performed on and provides the original frame of reference for the bounding boxes above." - }, - "joyProbability": { - "format": "float", - "type": "number" - }, - "landmarkPositions": { - "items": { - "$ref": "PhotosVisionServiceFaceFaceParamsLandmarkPosition" - }, - "type": "array" - }, - "landmarkingConfidence": { - "format": "float", - "type": "number" - }, - "leftEyeClosedProbability": { - "format": "float", - "type": "number" - }, - "longHairProbability": { - "format": "float", - "type": "number" - }, - "mouthOpenProbability": { - "format": "float", - "type": "number" - }, - "nonHumanProbability": { - "format": "float", - "type": "number" - }, - "panAngle": { - "description": "Yaw angle. Indicates how much leftward/rightward the face is pointing relative to the vertical plane perpendicular to the image. Range [-180,180].", - "format": "float", - "type": "number" - }, - "poseMatrix": { - "$ref": "PhotosVisionServiceFaceFaceParamsPoseMatrix" - }, - "pretemplate": { - "format": "byte", - "type": "string" - }, - "qualityScore": { - "description": "A score produced by the Face Quality Scoring Module that indicates overall quality of the face and its relative suitability for using it in conjunction with face recognition for instance. As such, the score predicts the likelihood to recognize a given face correctly. A face recognition client could use the score and a threshold to determine whether to use the face in a face model, or whether to even consider it for recognition.", - "format": "float", - "type": "number" - }, - "rightEyeClosedProbability": { - "format": "float", - "type": "number" - }, - "rollAngle": { - "description": "Roll angle indicates how much clockwise/anti-clockwise the face is rotated relative to the image vertical and about the axis perpendicular to the face. Range [-180,180].", - "format": "float", - "type": "number" - }, - "signature": { - "deprecated": true, - "description": "Deprecated: signature will continue to be used for the pre-1.7 SDK template format typically created by the converter module CNVprec_461. All newer templates created with CNVprec_465 or later will use the repeated 'versioned_signatures' field to store the templates and version info.", - "format": "byte", - "type": "string" - }, - "skinBrightnessProbability": { - "format": "float", - "type": "number" - }, - "sorrowProbability": { - "format": "float", - "type": "number" - }, - "surpriseProbability": { - "format": "float", - "type": "number" - }, - "tiltAngle": { - "description": "Pitch angle. Indicates how much upwards/downwards the face is pointing relative to the image's horizontal plane. Range [-180,180].", - "format": "float", - "type": "number" - }, - "underExposedProbability": { - "format": "float", - "type": "number" - }, - "versionedSignatures": { - "items": { - "$ref": "PhotosVisionServiceFaceVersionedFaceSignature" - }, - "type": "array" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsBoundingBox": { - "id": "PhotosVisionServiceFaceFaceParamsBoundingBox", - "properties": { - "x1": { - "description": "These coordinates are in the same scale as the original image. 0 <= x < width, 0 <= y < height.", - "format": "int32", - "type": "integer" - }, - "x2": { - "format": "int32", - "type": "integer" - }, - "y1": { - "format": "int32", - "type": "integer" - }, - "y2": { - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsExtendedLandmark": { - "description": "Below is the set of extended landmarks added by LMprec_508 and 510. All future additional landmarks should be added to this message.", - "id": "PhotosVisionServiceFaceFaceParamsExtendedLandmark", - "properties": { - "id": { - "enum": [ - "NOSE_BOTTOM_RIGHT", - "NOSE_BOTTOM_LEFT", - "NOSE_BOTTOM_CENTER", - "LEFT_EYE_TOP_BOUNDARY", - "LEFT_EYE_RIGHT_CORNER", - "LEFT_EYE_BOTTOM_BOUNDARY", - "LEFT_EYE_LEFT_CORNER", - "RIGHT_EYE_TOP_BOUNDARY", - "RIGHT_EYE_RIGHT_CORNER", - "RIGHT_EYE_BOTTOM_BOUNDARY", - "RIGHT_EYE_LEFT_CORNER", - "LEFT_EYEBROW_UPPER_MIDPOINT", - "RIGHT_EYEBROW_UPPER_MIDPOINT", - "LEFT_EAR_TRAGION", - "RIGHT_EAR_TRAGION", - "LEFT_EYE_PUPIL", - "RIGHT_EYE_PUPIL", - "FOREHEAD_GLABELLA", - "CHIN_GNATHION", - "CHIN_LEFT_GONION", - "CHIN_RIGHT_GONION", - "LEFT_CHEEK_CENTER", - "RIGHT_CHEEK_CENTER", - "UNKNOWN_LANDMARK" - ], - "enumDescriptions": [ - "", - "", - "The following landmark is available with LMprec_508 and later", - "The following landmarks are extracted by LMprec_510 and later. See also documentation at www/~jsteffens/no_crawl/doc/FaceDetection/LM510.pdf", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "The following landmarks are extracted by LMprec_600 and later. See go/facesdk.", - "", - "Reserved id for an unknown landmark. This matches the id reserved by the core SDK for an external UNKNOWN landmark." - ], - "type": "string" - }, - "x": { - "description": "NOTE that landmark positions may fall outside the bounds of the image when the face is near one or more edges of the image. That is, it is NOT guaranteed that 0 <= x < width or 0 <= y < height. Rounded version of x_f.", - "format": "int32", - "type": "integer" - }, - "xF": { - "format": "float", - "type": "number" - }, - "y": { - "description": "Rounded version of y_f.", - "format": "int32", - "type": "integer" - }, - "yF": { - "format": "float", - "type": "number" - }, - "z": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsFaceCropV8": { - "description": "Information defining a FaceCrop for a particular face. See go/on-device-face-grouping-face-crops for more details.", - "id": "PhotosVisionServiceFaceFaceParamsFaceCropV8", - "properties": { - "centerX": { - "description": "The X coordinate of the center of the face crop.", - "format": "float", - "type": "number" - }, - "centerY": { - "description": "The Y coordinate of the center of the face crop.", - "format": "float", - "type": "number" - }, - "rotation": { - "description": "Rotation of the face crop, in radians.", - "format": "float", - "type": "number" - }, - "scale": { - "description": "Scale to apply to the coordinates of the face crop.", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsLandmarkPosition": { - "id": "PhotosVisionServiceFaceFaceParamsLandmarkPosition", - "properties": { - "landmark": { - "description": "Some landmarks are set during face finding and some are set during landmark finding. Only after landmarking will all landmarks be set.", - "enum": [ - "LEFT_EYE", - "RIGHT_EYE", - "LEFT_OF_LEFT_EYEBROW", - "RIGHT_OF_LEFT_EYEBROW", - "LEFT_OF_RIGHT_EYEBROW", - "RIGHT_OF_RIGHT_EYEBROW", - "MIDPOINT_BETWEEN_EYES", - "NOSE_TIP", - "UPPER_LIP", - "LOWER_LIP", - "MOUTH_LEFT", - "MOUTH_RIGHT", - "MOUTH_CENTER", - "DEPRECATED_NOSE_BOTTOM_RIGHT", - "DEPRECATED_NOSE_BOTTOM_LEFT", - "DEPRECATED_NOSE_BOTTOM_CENTER", - "DEPRECATED_LEFT_EYE_TOP_BOUNDARY", - "DEPRECATED_LEFT_EYE_RIGHT_CORNER", - "DEPRECATED_LEFT_EYE_BOTTOM_BOUNDARY", - "DEPRECATED_LEFT_EYE_LEFT_CORNER", - "DEPRECATED_RIGHT_EYE_TOP_BOUNDARY", - "DEPRECATED_RIGHT_EYE_RIGHT_CORNER", - "DEPRECATED_RIGHT_EYE_BOTTOM_BOUNDARY", - "DEPRECATED_RIGHT_EYE_LEFT_CORNER", - "DEPRECATED_LEFT_EYEBROW_UPPER_MIDPOINT", - "DEPRECATED_RIGHT_EYEBROW_UPPER_MIDPOINT", - "DEPRECATED_LEFT_EAR_TRAGION", - "DEPRECATED_RIGHT_EAR_TRAGION", - "DEPRECATED_FOREHEAD_GLABELLA", - "DEPRECATED_CHIN_GNATHION", - "DEPRECATED_CHIN_LEFT_GONION", - "DEPRECATED_CHIN_RIGHT_GONION", - "DEPRECATED_UNKNOWN_LANDMARK" - ], - "enumDeprecated": [ - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true - ], - "enumDescriptions": [ - "Left and right are as viewed in the image without considering mirror projection typical in photos. So LEFT_EYE is typically the person's right eye. For convenience and consistency the enum values mirror the corresponding values defined by the Neven Vision SDK. See landmark table at: wiki/twiki/bin/view/Main/FRSDKLandmarkPositions The following landmarks are extracted by LMprec_502 and later", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "All values below are deprecated. Please use ExtendedLandmark to use them.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "x": { - "description": "NOTE that landmark positions may fall outside the bounds of the image when the face is near one or more edges of the image. That is, it is NOT guaranteed that 0 <= x < width or 0 <= y < height. Rounded version of x_f.", - "format": "int32", - "type": "integer" - }, - "xF": { - "format": "float", - "type": "number" - }, - "y": { - "description": "Rounded version of y_f.", - "format": "int32", - "type": "integer" - }, - "yF": { - "format": "float", - "type": "number" - }, - "z": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsPoseMatrix": { - "description": "Stores the full pose transformation matrix of the detected face. From this the roll, pan, tilt angles can be computed.", - "id": "PhotosVisionServiceFaceFaceParamsPoseMatrix", - "properties": { - "xx": { - "format": "float", - "type": "number" - }, - "xy": { - "format": "float", - "type": "number" - }, - "xz": { - "format": "float", - "type": "number" - }, - "yx": { - "format": "float", - "type": "number" - }, - "yy": { - "format": "float", - "type": "number" - }, - "yz": { - "format": "float", - "type": "number" - }, - "zx": { - "format": "float", - "type": "number" - }, - "zy": { - "format": "float", - "type": "number" - }, - "zz": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceImageParams": { - "description": "ImageParams are a collection of parameters of the image on which face detection was performed.", - "id": "PhotosVisionServiceFaceImageParams", - "properties": { - "height": { - "format": "int32", - "type": "integer" - }, - "width": { - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceVersionedFaceSignature": { - "description": "From newer SDK versions onward (1.7+), each face template (signature) will also store a version # derived from the converter version that created the template.", - "id": "PhotosVisionServiceFaceVersionedFaceSignature", - "properties": { - "confidence": { - "description": "Confidence score based on embedding uncertainty. This is populated if fetch_facenet_confidence has been set as true in FaceNetConfig, and FaceNet version satisfies one of the following: 1. FACENET_8. 2. FACENET_9 with confidence model enabled in FaceTemplatesConfig. If face_embedding_confidence module is requested, this will also be populated, and the signature will be empty.", - "format": "float", - "type": "number" - }, - "confidenceVersion": { - "description": "The Confidence version that populated the confidence.", - "enum": [ - "EMBEDDING_CONFIDENCE_VERSION_UNSPECIFIED", - "VERSION_1", - "VERSION_2" - ], - "enumDescriptions": [ - "", - "Corresponds to VSSV1DNormTfLiteClient. Regions without an embedding confidence version should be assumed to have this version.", - "Corresponds to AAV2DNorm. This is an animal-aware version with scores compatible with VERSION_1." - ], - "type": "string" - }, - "converterVersion": { - "description": "The converter version that created this template.", - "enum": [ - "UNKNOWN", - "PREC_461", - "PREC_465", - "PREC_470", - "FACENET_7", - "FACENET_8", - "FACENET_CELEBRITY", - "FACENET_9", - "FACENET_9_TPU", - "FACENET_MOBILE_V1_8BITS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "signature": { - "description": "The face template bytes.", - "format": "byte", - "type": "string" - }, - "signatureSource": { - "description": "Specifies the source of the signature in cases where the bytes are from a lower level of the FaceNet architecture. This is useful in combination with the FaceNetClient when it returns multiple outputs and we need to keep track of their contents. For example, this could contain the string 'avgpool-0' while another instance can use the standard 'normalizing' string.", - "type": "string" - }, - "version": { - "description": "The internal version of the template. This is a copy of the version stored within the template.", - "format": "uint32", - "type": "integer" - } - }, - "type": "object" - }, "PornFlagData": { "description": "A protocol buffer to store the url, referer and porn flag for a url. and an optional image score. Next available tag id: 51.", "id": "PornFlagData", @@ -100776,7 +100059,7 @@ "type": "object" }, "QualityNavboostCrapsCrapsData": { - "description": "NEXT TAG: 27", + "description": "NEXT TAG: 28", "id": "QualityNavboostCrapsCrapsData", "properties": { "agingCounts": { @@ -100872,6 +100155,11 @@ }, "url": { "type": "string" + }, + "voterTokenCount": { + "description": "The number of distinct voter tokens (a lower bound on the number of distinct users that contributed to the entry, used for privacy-related filtering).", + "format": "int32", + "type": "integer" } }, "type": "object" @@ -100937,6 +100225,10 @@ "signals": { "$ref": "QualityNavboostCrapsCrapsClickSignals", "description": "CRAPS Signals for the locale." + }, + "voterTokenBitmap": { + "$ref": "QualityNavboostGlueVoterTokenBitmapMessage", + "description": "The set of voter tokens of the sessions that contributed to this feature's stats. Voter tokens are not unique per user, so it is a lower bound on the number of distinct users. Used for privacy-related filtering." } }, "type": "object" @@ -101007,6 +100299,24 @@ }, "type": "object" }, + "QualityNavboostGlueVoterTokenBitmapMessage": { + "description": "Used for aggregating query unique voter_token during merging. We use 4 uint64(s) as a 256-bit bitmap to aggregate distinct voter_tokens in Glue model pipeline. Number of elements should always be either 0 or 4. As an optimization, we store the voter_token as a single uint64 if only one bit is set. See quality/navboost/speedy_glue/util/voter_token_bitmap.h for the class that manages operations on these bitmaps.", + "id": "QualityNavboostGlueVoterTokenBitmapMessage", + "properties": { + "subRange": { + "items": { + "format": "uint64", + "type": "string" + }, + "type": "array" + }, + "voterToken": { + "format": "uint64", + "type": "string" + } + }, + "type": "object" + }, "QualityNsrExperimentalNsrTeamData": { "description": "Experimental NsrTeam data. This is a proto containing versioned signals which can be used to run live experiments. This proto will not be propagated to MDU shards, but it will be populated at query time by go/web-signal-joins inside the CompressedQualitySignals subproto of PerDocData proto. See go/0DayLEs for the design doc. Note how this is only meant to be used during LEs, it should *not* be used for launches.", "id": "QualityNsrExperimentalNsrTeamData", @@ -101136,7 +100446,7 @@ "type": "object" }, "QualityNsrNsrData": { - "description": "NOTE: When adding a new field to be propagated to Raffia check if NsrPatternSignalSpec needs to be updated. Next ID: 54", + "description": "NOTE: When adding a new field to be propagated to Raffia check if NsrPatternSignalSpec needs to be updated. Next ID: 55", "id": "QualityNsrNsrData", "properties": { "articleScore": { @@ -101340,6 +100650,11 @@ "format": "float", "type": "number" }, + "smallPersonalSite": { + "description": "Score of small personal site promotion go/promoting-personal-blogs-v1", + "format": "float", + "type": "number" + }, "spambrainLavcScore": { "deprecated": true, "description": "The SpamBrain LAVC score, as of July 2022. See more information at go/cloverfield-lavc-deck.", @@ -104252,7 +103567,8 @@ "NAME_CORRECTION_LOG", "FUZZY_CONTACT_MATCH", "NEURAL_CONTACT_MATCH", - "NEURAL_CONTACT_MATCH_DARK_LAUNCH" + "NEURAL_CONTACT_MATCH_DARK_LAUNCH", + "PERSONALIZED_NAME_CORRECTION_LOG" ], "enumDescriptions": [ "", @@ -104261,7 +103577,8 @@ "Alternate name from contact correction history.", "Fuzzy match with user's contacts.", "Neural match. See go/phonetic-contact-match.", - "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it." + "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", + "Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." ], "type": "string" } @@ -104479,7 +103796,8 @@ "NAME_CORRECTION_LOG", "FUZZY_CONTACT_MATCH", "NEURAL_CONTACT_MATCH", - "NEURAL_CONTACT_MATCH_DARK_LAUNCH" + "NEURAL_CONTACT_MATCH_DARK_LAUNCH", + "PERSONALIZED_NAME_CORRECTION_LOG" ], "enumDescriptions": [ "", @@ -104488,7 +103806,8 @@ "Alternate name from contact correction history.", "Fuzzy match with user's contacts.", "Neural match. See go/phonetic-contact-match.", - "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it." + "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", + "Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." ], "type": "string" }, @@ -105895,11 +105214,6 @@ "format": "uint64", "type": "string" }, - "productPopularity": { - "description": "Organic product popularity.", - "format": "double", - "type": "number" - }, "relevanceEmbedding": { "description": "Relevance embedding from ShoppingAnnotation.Product", "items": { @@ -107369,20 +106683,6 @@ }, "type": "object" }, - "ReneFaceResponse": { - "description": "The output of the face recognition signal.", - "id": "ReneFaceResponse", - "properties": { - "faces": { - "description": "Recognized faces in the image.", - "items": { - "$ref": "PhotosVisionServiceFaceFaceParams" - }, - "type": "array" - } - }, - "type": "object" - }, "RepositoryAnnotationsGeoTopic": { "description": "GeoTopicality of a document is a set of GeoTopics ordered by their normalized scores.", "id": "RepositoryAnnotationsGeoTopic", @@ -111160,16 +110460,6 @@ "$ref": "RepositoryWebrefSubSegmentIndex", "description": "Identifies the sub-segment where the annotation occurs. See SubSegmentIndex for details. Not present in QRef, also deprecated for URL segment types." }, - "timeOffsetConfidence": { - "description": "Confidence for the time_offset_ms annotation, quantized to values in range 0-127 (see speech::VideoASRServerUtil::ConfidenceQuantize for how the quantization was done). Confidence can be empty for special characters (e.g. spaces).", - "format": "int32", - "type": "integer" - }, - "timeOffsetMs": { - "description": "Timestamp that this mention appeared in the video. The field is only populated for VIDEO_TRANSCRIPT when the byte offset is the same. It is extracted from cdoc.doc_videos.content_based_metadata.transcript_asr.transcript.timestamp.", - "format": "int32", - "type": "integer" - }, "trustedNameConfidence": { "description": "Confidence that this name is a trusted name of the entity. This is set only in case the confidence is higher than an internal threshold (see ConceptProbability).", "format": "float", @@ -112897,10 +112187,6 @@ }, "webrefOutlinkInfos": { "$ref": "RepositoryWebrefWebrefOutlinkInfos" - }, - "webrefOutlinksLegacy": { - "$ref": "Proto2BridgeMessageSet", - "deprecated": true } }, "type": "object" @@ -114346,10 +113632,6 @@ "$ref": "Proto2BridgeMessageSet", "description": "Optional extensions (e.g. taxonomic classifications)." }, - "outlinkInfos": { - "$ref": "RepositoryWebrefWebrefOutlinkInfos", - "description": "Information about the outlinks of this document. " - }, "webrefParsedContentSentence": { "description": "The content (CONTENT section 0) as parsed by WebrefParser. Only used by //r/w/postprocessing/idf/idf-pipeline for document ngram idf computation. Populated when the annotator is run with webref_populate_parsed_content Each webref_parsed_content_sentence represents one sentence of the context where saft annotations were used to determine the sentence boundaries. See r/w/universal/processors/saft/saft-sentence-helper.h for details.", "items": { @@ -116503,961 +115785,6 @@ }, "type": "object" }, - "ResearchVisionFace2cartoonAgeClassifierResults": { - "id": "ResearchVisionFace2cartoonAgeClassifierResults", - "properties": { - "age": { - "enum": [ - "UNKNOWN", - "BABY", - "KID", - "ADULT", - "OLD" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "predictedAge": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonChinLengthClassifierResults": { - "id": "ResearchVisionFace2cartoonChinLengthClassifierResults", - "properties": { - "chinLength": { - "enum": [ - "UNKNOWN", - "SHORT", - "AVERAGE", - "LONG" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - }, - "confidence": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeColorClassifierResults": { - "id": "ResearchVisionFace2cartoonEyeColorClassifierResults", - "properties": { - "color": { - "enum": [ - "UNKNOWN", - "BROWN_OR_BLACK", - "BLUE_OR_GREEN" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "confidence": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeEyebrowDistanceClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonEyeEyebrowDistanceClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyeEyebrowDistance": { - "enum": [ - "UNKNOWN", - "SMALL", - "AVERAGE", - "LARGE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeShapeClassifierResults": { - "id": "ResearchVisionFace2cartoonEyeShapeClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "shape": { - "enum": [ - "UNKNOWN", - "DOUBLE_FOLD_EYELID", - "SINGLE_FOLD_EYELID" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeSlantClassifierResults": { - "id": "ResearchVisionFace2cartoonEyeSlantClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyeSlant": { - "enum": [ - "UNKNOWN", - "OUTWARDS", - "AVERAGE", - "INWARDS" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeVerticalPositionClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonEyeVerticalPositionClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyeVerticalPosition": { - "enum": [ - "UNKNOWN", - "HIGH", - "AVERAGE", - "LOW" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyebrowShapeClassifierResults": { - "id": "ResearchVisionFace2cartoonEyebrowShapeClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyebrowShape": { - "enum": [ - "UNKNOWN", - "ST_BREAK", - "ST_BEND", - "HIGH_DIAGONAL", - "TILT", - "ROUND", - "ANGULAR", - "HIGH_CURVY", - "ROUND_UNEVEN", - "BUSHY_ST", - "UNI" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyebrowThicknessClassifierResults": { - "id": "ResearchVisionFace2cartoonEyebrowThicknessClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyebrowThickness": { - "enum": [ - "UNKNOWN", - "THIN", - "NORMAL", - "THICK", - "VERY_THICK" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyebrowWidthClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonEyebrowWidthClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyebrowWidth": { - "enum": [ - "UNKNOWN", - "NARROW", - "AVERAGE", - "WIDE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonFace2CartoonResults": { - "description": "Results of the Face2Cartoon pipeline.", - "id": "ResearchVisionFace2cartoonFace2CartoonResults", - "properties": { - "ageClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonAgeClassifierResults" - }, - "type": "array" - }, - "chinLengthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonChinLengthClassifierResults" - }, - "type": "array" - }, - "eyeColorClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeColorClassifierResults" - }, - "type": "array" - }, - "eyeEyebrowDistanceClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeEyebrowDistanceClassifierResults" - }, - "type": "array" - }, - "eyeShapeClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeShapeClassifierResults" - }, - "type": "array" - }, - "eyeSlantClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeSlantClassifierResults" - }, - "type": "array" - }, - "eyeVerticalPositionClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeVerticalPositionClassifierResults" - }, - "type": "array" - }, - "eyebrowShapeClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyebrowShapeClassifierResults" - }, - "type": "array" - }, - "eyebrowThicknessClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyebrowThicknessClassifierResults" - }, - "type": "array" - }, - "eyebrowWidthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyebrowWidthClassifierResults" - }, - "type": "array" - }, - "faceWidthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonFaceWidthClassifierResults" - }, - "type": "array" - }, - "facialHairClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonFacialHairClassifierResults" - }, - "type": "array" - }, - "genderClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonGenderClassifierResults" - }, - "type": "array" - }, - "glassesClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonGlassesClassifierResults" - }, - "type": "array" - }, - "hairColorClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonHairColorClassifierResults" - }, - "type": "array" - }, - "hairStyleClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonHairStyleClassifierResults" - }, - "type": "array" - }, - "interEyeDistanceClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonInterEyeDistanceClassifierResults" - }, - "type": "array" - }, - "jawShapeClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonJawShapeClassifierResults" - }, - "type": "array" - }, - "lipThicknessClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonLipThicknessClassifierResults" - }, - "type": "array" - }, - "mouthVerticalPositionClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonMouthVerticalPositionClassifierResults" - }, - "type": "array" - }, - "mouthWidthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonMouthWidthClassifierResults" - }, - "type": "array" - }, - "noseVerticalPositionClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonNoseVerticalPositionClassifierResults" - }, - "type": "array" - }, - "noseWidthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonNoseWidthClassifierResults" - }, - "type": "array" - }, - "skinToneClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonSkinToneClassifierResults" - }, - "type": "array" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonFaceWidthClassifierResults": { - "id": "ResearchVisionFace2cartoonFaceWidthClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "faceWidth": { - "enum": [ - "UNKNOWN", - "NARROW", - "AVERAGE", - "WIDE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonFacialHairClassifierResults": { - "id": "ResearchVisionFace2cartoonFacialHairClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "facialHair": { - "enum": [ - "UNKNOWN", - "NO_FACIAL_HAIR", - "CLOSE_SHAVE", - "SHORT_BEARD_2", - "SHORT_BEARD_1", - "MED_BEARD", - "SHORT_BEARD_5", - "GOATEE", - "MOUSTACHE", - "MOUSTACHE_GOATEE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonGenderClassifierResults": { - "id": "ResearchVisionFace2cartoonGenderClassifierResults", - "properties": { - "confidence": { - "description": "Uses a scaled version of the FaceSDK classifier's probability as the confidence (since the probability for the selected gender is between (0.5, 1] we scale it to be between (0, 1]).", - "format": "float", - "type": "number" - }, - "gender": { - "enum": [ - "UNKNOWN", - "FEMALE", - "MALE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonGlassesClassifierResults": { - "id": "ResearchVisionFace2cartoonGlassesClassifierResults", - "properties": { - "confidence": { - "description": "Uses a scaled version of the FaceSDK classifier's probability as the confidence (since the probability for the selected glasses is between (0.5, 1] we scale it to be between (0, 1]).", - "format": "float", - "type": "number" - }, - "glassesType": { - "enum": [ - "UNKNOWN", - "NO_GLASSES", - "GLASSES", - "DARK_GLASSES" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonHairColorClassifierResults": { - "id": "ResearchVisionFace2cartoonHairColorClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "hairColor": { - "enum": [ - "UNKNOWN", - "BLACK", - "DARK_BROWN", - "LIGHT_BROWN", - "AUBURN", - "ORANGE", - "STRAWBERRY_BLONDE", - "DIRTY_BLONDE", - "BLEACHED_BLONDE", - "GREY", - "WHITE", - "MINT", - "PALE_PINK", - "LAVENDER", - "TEAL", - "PURPLE", - "PINK", - "BLUE", - "GREEN" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonHairStyleClassifierResults": { - "id": "ResearchVisionFace2cartoonHairStyleClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "hairStyle": { - "enum": [ - "UNKNOWN", - "BALD_1", - "BALD_2", - "BALD_3", - "SHAVE_1", - "FRONT_CREW_1", - "SHORT_STRAIGHT_9", - "SHORT_STRAIGHT_17", - "BUN_1", - "SHORT_STRAIGHT_2", - "SHORT_STRAIGHT_10", - "SHORT_STRAIGHT_1", - "SHORT_STRAIGHT_19", - "SHORT_STRAIGHT_4", - "SHORT_STRAIGHT_20", - "SHORT_STRAIGHT_18", - "SHORT_STRAIGHT_11", - "MEDIUM_STRAIGHT_5", - "MEDIUM_STRAIGHT_6", - "MEDIUM_STRAIGHT_3", - "LONG_STRAIGHT_6", - "LONG_STRAIGHT_4", - "LONG_STRAIGHT_2", - "LONG_STRAIGHT_PONYTAIL_2", - "LONG_STRAIGHT_PONYTAIL_1", - "SHORT_WAVY_2", - "MEDIUM_WAVY_1", - "MEDIUM_WAVY_4", - "MEDIUM_WAVY_2", - "LONG_WAVY_1", - "LONG_WAVY_3", - "LONG_WAVY_2", - "LONG_WAVY_4", - "LONG_WAVY_PONYTAIL_4", - "SHORT_CURLY_6", - "SHORT_CURLY_5", - "MEDIUM_CURLY_3", - "SHORT_CURLY_8", - "MEDIUM_CURLY_4", - "LONG_CURLY_3", - "LONG_CURLY_1", - "LONG_CURLY_5", - "LONG_CURLY_4", - "LONG_CURLY_2", - "LONG_CURLY_PONYTAIL_1", - "SHORT_COILY_1", - "SHORT_COILY_5", - "SHORT_COILY_4", - "SHORT_COILY_2", - "MEDIUM_COILY_1", - "LONG_COILY_2", - "LONG_COILY_PONYTAIL_1", - "SHORT_COILY_3", - "LONG_COILY_1", - "BOX_BRAIDS", - "BUN_2", - "COILY_PONYTAIL", - "LONG_COILY_3", - "LONG_COILY_4", - "LONG_COILY_5", - "LONG_COILY_PONYTAIL", - "OTT", - "SHORT_CORNROWS", - "TIGHT_BRAID", - "TIGHT_BRAIDS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonInterEyeDistanceClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonInterEyeDistanceClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "interEyeDistance": { - "enum": [ - "UNKNOWN", - "WIDE", - "AVERAGE", - "CLOSE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonJawShapeClassifierResults": { - "id": "ResearchVisionFace2cartoonJawShapeClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "jawShape": { - "enum": [ - "UNKNOWN", - "TRIANGLE", - "OVAL", - "SQUARE", - "ROUND" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonLipThicknessClassifierResults": { - "id": "ResearchVisionFace2cartoonLipThicknessClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "lipThickness": { - "enum": [ - "UNKNOWN", - "THIN", - "AVERAGE", - "THICK" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonMouthVerticalPositionClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonMouthVerticalPositionClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "mouthVerticalPosition": { - "enum": [ - "UNKNOWN", - "HIGH", - "AVERAGE", - "LOW" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonMouthWidthClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonMouthWidthClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "mouthWidth": { - "enum": [ - "UNKNOWN", - "NARROW", - "AVERAGE", - "WIDE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonNoseVerticalPositionClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonNoseVerticalPositionClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "noseVerticalPosition": { - "enum": [ - "UNKNOWN", - "HIGH", - "AVERAGE", - "LOW" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonNoseWidthClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonNoseWidthClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "noseWidth": { - "enum": [ - "UNKNOWN", - "NARROW", - "AVERAGE", - "WIDE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonSkinToneClassifierResults": { - "id": "ResearchVisionFace2cartoonSkinToneClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "skinToneType": { - "enum": [ - "UNKNOWN", - "TYPE_1", - "TYPE_2", - "TYPE_3", - "TYPE_4", - "TYPE_5", - "TYPE_6", - "TYPE_7", - "TYPE_8", - "TYPE_9", - "TYPE_10", - "TYPE_11" - ], - "enumDescriptions": [ - "See the images from the links at: https://storage.googleapis.com/cc_8e814306-f840-4e2e-9415-36b06251cf8e/ skin_tone_exemplars/skin-*.png", - "(darkest) RGB: #603d30", - "RGB: #88594b", - "RGB: #aa7454", - "RGB: #c78b5d", - "RGB: #d9a16e", - "RGB: #e3b47e", - "RGB: #eeaf94", - "RGB: #f0c092", - "RGB: #f6d8c1", - "RGB: #fbcdb6", - "(lightest) RGB: #fbdbd1" - ], - "type": "string" - } - }, - "type": "object" - }, "RichsnippetsDataObject": { "description": "Next ID: 11", "id": "RichsnippetsDataObject", @@ -125750,7 +124077,7 @@ "type": "object" }, "TrawlerFetchReplyData": { - "description": "Fetcher -> FetchClient FetchReplyData is the metadata for a reply from a FetchRequest. For metadata + document body, FetchReply is further below. NOTE: FetchReplyData (and FetchReply) is the output interface from Multiverse. Teams outside Multiverse/Trawler should not create fake FetchReplies. Trawler: When adding new fields here, it is recommended that at least the following be rebuilt and pushed: - cron_fetcher_index mapreduces: so that UrlReplyIndex, etc. retain the new fields - tlookup, tlookup_server: want to be able to return the new fields - logviewer, fetchutil: annoying to get back 'tag88:' in results -------------------------- Next Tag: 124 -----------------------", + "description": "Fetcher -> FetchClient FetchReplyData is the metadata for a reply from a FetchRequest. For metadata + document body, FetchReply is further below. NOTE: FetchReplyData (and FetchReply) is the output interface from Multiverse. Teams outside Multiverse/Trawler should not create fake FetchReplies. Trawler: When adding new fields here, it is recommended that at least the following be rebuilt and pushed: - cron_fetcher_index mapreduces: so that UrlReplyIndex, etc. retain the new fields - tlookup, tlookup_server: want to be able to return the new fields - logviewer, fetchutil: annoying to get back 'tag88:' in results -------------------------- Next Tag: 125 -----------------------", "id": "TrawlerFetchReplyData", "properties": { "BadSSLCertificate": { @@ -126142,6 +124469,9 @@ "The context of refresh crawl is that client needs to check the content of some URLs periodically, so they refresh those URLs regularly." ], "type": "string" + }, + "webioInfo": { + "$ref": "TrawlerFetchReplyDataWebIOInfo" } }, "type": "object" @@ -127010,6 +125340,34 @@ }, "type": "object" }, + "TrawlerFetchReplyDataWebIOInfo": { + "description": "WebIO is the new hostload model introduced in 2023. It measures the occupancy of 1 outgoing fetch connection for 1 minute.", + "id": "TrawlerFetchReplyDataWebIOInfo", + "properties": { + "webio": { + "format": "float", + "type": "number" + }, + "webioPercentageTier": { + "enum": [ + "WEBIO_TIER_1", + "WEBIO_TIER_2", + "WEBIO_TIER_3", + "WEBIO_TIER_4", + "WEBIO_NUM_TIERS" + ], + "enumDescriptions": [ + "Utilization 90-100%", + "Utilization 70%-90%", + "Utilization 30%-70%", + "Utilization 0%-30%", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "TrawlerFetchStatus": { "id": "TrawlerFetchStatus", "properties": { @@ -128835,6 +127193,9 @@ "MULTILINE_SUBSCRIPTION_ADDON_TITLE_SESSION_LEVEL", "PAYTM_WALLET_FAILURE_SESSION_LEVEL", "CART_ABANDONMENT_SUBSCRIPTION_BENEFITS_SESSION_LEVEL_V2", + "MULTILINE_SUBSCRIPTION_BASIC_RESTORE_ENABLED_SESSION_LEVEL", + "DECLINE_MESSAGE_IN_SUBSCENTER_FIX_FLOW_SESSION_LEVEL_V1", + "SAVE_FOR_LATER_CART_ABANDONMENT_SCREEN_SESSION_LEVEL", "SESSION_LEVEL_TEST_CODE_LIMIT", "CART_ABANDONMENT_USER_LEVEL", "IN_APP_PRODUCTS_IN_DETAILS_PAGE_USER_LEVEL", @@ -129047,12 +127408,7 @@ "HAS_MONETIZATION_BEHAVIOR_LAST_180D_USER_LEVEL", "HAS_LAST_28D_CART_ABANDONMENT_USER_LEVEL", "HAS_LAST_7D_CART_ABANDONMENT_USER_LEVEL", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V2", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_1", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_2", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_3", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_4", + "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_5", "POST_SUCCESS_ADD_BACKUP_FLOW_USER_LEVEL", "SKIP_CHECK_MARK_SCREEN_WITH_BACKUP_FLOW_USER_LEVEL", "IS_ELIGIBLE_FOR_ONE_CLICK_BACKUP_FOP_USER_LEVEL", @@ -129199,6 +127555,9 @@ "UNIFIED_ITEM_RECOMMENDATION_LOWER_PRICED_USER_LEVEL", "CART_WITH_BROKEN_FOP_USER_LEVEL", "CART_ABANDONMENT_SUBSCRIPTION_BENEFITS_USER_LEVEL_V2", + "DECLINE_MESSAGE_IN_SUBSCENTER_FIX_FLOW_USER_LEVEL", + "MULTILINE_SUBSCRIPTION_BASIC_RESTORE_ENABLED_USER_LEVEL", + "SAVE_FOR_LATER_CART_ABANDONMENT_SCREEN_USER_LEVEL", "USER_LEVEL_TEST_CODE_LIMIT" ], "enumDeprecated": [ @@ -129696,256 +128055,59 @@ false, false, false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, false, false, false, @@ -130176,6 +128338,89 @@ false, false, false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, true, false, false, @@ -130290,6 +128535,68 @@ false, false, false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, false, false, false, @@ -130405,7 +128712,6 @@ false, false, false, - true, false, false, false, @@ -130433,6 +128739,57 @@ false, false, false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, true, false, false, @@ -130578,6 +128935,9 @@ false, false, false, + false, + false, + false, false ], "enumDescriptions": [ @@ -131593,6 +129953,9 @@ "Session-level test code for multiline addon title.", "Session-level test code for Paytm wallet failures.", "Session-level for showing subscription benefits in cart abandonment.", + "Session_level test code for multiline basic restore enabled.", + "Session-level test code thst indicates decline message is popluated in subscenter.", + "Session level test code for Save For Later cart abandonment screen.", "", "Cart abandonment flow for purchase flow.", "User saw/would have seen the in app products section in App", @@ -131805,11 +130168,6 @@ "User level test code for users who have made any monetization behavior(sub, iap) for the last 180 days (controlled by ULYSSES_OOP_SPEND_PER_PURCHASE_180D), used for AH/GH monetization experiments.", "User level test code for users who have any purchase card abandon behavior in the last 28 day (controlled by LAST_28D_CART_ABANDONMENT_BACKEND), used for AH/GH monetization experiments.", "User level test code for users who have any purchase card abandon behavior in the last 7 day (controlled by LAST_7D_CART_ABANDONMENT_BACKEND), used for AH/GH monetization experiments.", - "User level test code for link biometrics with impression cap and foped user setup.", - "User level test code for link biometrics with impression cap and foped user setup.", - "User level test code for link biometrics with impression cap and foped user setup after traffic rebalancing.", - "User level test code for link biometrics with impression cap and foped user setup after traffic rebalancing.", - "User level test code for link biometrics with impression cap and foped user setup after traffic rebalancing.", "User level test code for link biometrics with impression cap and foped user setup after traffic rebalancing.", "User level test code for post success add backup flow.", "User level test code for skipping ckechmark screen with backup flow.", @@ -131876,7 +130234,7 @@ "", "", "", - "User level test code for reinstall enablement. If user has any eligible reinstall passing the per user filtering logic, testcode will be logged. Note that the filtering logics are controlled by gcl flags. Ex. Play Games Home: http://shortn/_2aGCRQqToq. This test code only knows if any app passes the filtering but not which filtering params are applied.", + "User level test code for reinstall enablement. If user has any eligible reinstall passing the per user filtering logic, testcode will be logged. Note that the filtering logic are controlled by gcl flags. Ex. Play Games Home: http://shortn/_2aGCRQqToq. This test code only knows if any app passes the filtering but not which filtering params are applied.", "User level test code for tagging users who have any app which is recommended by PRS and has reinstall eligibility when is_app_with_historical_oop_purchase restriction is turned on.", "User-level test code for tagging users with previous OOP spend on games.", "User-level test code for tagging users with previous OOP spend on applications.", @@ -131957,6 +130315,9 @@ "", "User level test code indicating that user starts the purchase with a cart that has broken existing form of payment.", "User-level for showing subscription benefits in cart abandonment.", + "User-level test code for users who see the decline message in subscenter.", + "User level test code for multiline basic restore enabled.", + "User level test code for Save For Later cart abandonment screen. Add new user-level TestCode here.", "" ], "type": "string" @@ -132423,7 +130784,8 @@ "NS_SEARCH_SPORTS", "NS_BUSINESSMESSAGING", "NS_AERIAL_VIEW", - "NS_DOCS_FLIX_RENDER" + "NS_DOCS_FLIX_RENDER", + "NS_SHOPPING" ], "enumDeprecated": [ false, @@ -132468,6 +130830,7 @@ false, false, false, + false, false ], "enumDescriptions": [ @@ -132513,7 +130876,8 @@ "Namespace for Search Sports vertical videos.", "Namespace for Business Messaging videos.", "Namespace for Geo Aerial View", - "Namespace for Flix Render (Docs) Please receive approval via go/vp-newclients before adding a new namespace." + "Namespace for Flix Render (Docs)", + "Namespace for CDS videos processed through Amarna. Please receive approval via go/vp-newclients before adding a new namespace." ], "type": "string" } @@ -148987,7 +147351,9 @@ "COUNTERFEIT", "COURT_ORDER", "CTM", + "DANGEROUS", "DEFAMATION", + "EATING_DISORDERS", "GOVERNMENT_ORDER", "HARASSMENT", "HATE", @@ -149003,17 +147369,17 @@ "QUOTA_EXCEEDED", "REGULATED", "SPAM", + "SUICIDE_AND_SELF_HARM", "TRADEMARK", "UNSAFE_RACY", "UNWANTED_SOFTWARE", "UNWANTED_CONTENT", "VIOLENCE", - "DANGEROUS", + "VIOLENT_EXTREMISM", "BLOCKED_LINKS", "BLOCKED_WORDS", "ENABLED_HOLD_ALL", "HIDDEN_USER_LIST", - "VIOLENT_EXTREMISM", "PRIVILEGED_USER_REJECTED", "ABOVE_REJECT_INAPPROPRIATE_SCORE", "TOO_MANY_BAD_CHARS", @@ -149027,7 +147393,9 @@ "Promotion of counterfeit product claims.", "Third-party court orders.", "Circumvention of Technological measures claims. Circumventing protection mechanisms on copyrighted work.", + "Content depicts or provides instructions to complete activities that are dangerous and/or widely illegal, e.g. prostitution, bomb-making, suicide.", "Defamation claims.", + "Content that demonstrates eating disorders", "Government request, regardless of reason.", "Consistent harassing behavior directed towards a person.", "", @@ -149043,17 +147411,17 @@ "", "Contains regulated products and services, such as pharmaceuticals, alcohol, tobacco, etc. For details, https://sites.google.com/a/google.com/crt-policy-site/regulated", "", + "Content that demonstrates suicide and self harm", "Trademark violations where Google could be liable.", "Content that is unsafe because it is sexually suggestive/racy.", "The software is deceptive, promising a value proposition that it does not meet, or tries to trick users into installing it or it piggybacks on the installation of another program, or doesn\u2019t tell the user about all of its principal and significant functions or affects the user\u2019s system in unexpected ways, or collects or transmits private information without the user\u2019s knowledge, or bundled with other software and its presence is not disclosed.", "Content includes spammy commercial content, such as links to MFA pages, affiliate links, ads or solicitation, or otherwise off-topic or irrelevant content.", "", - "Content depicts or provides instructions to complete activities that are dangerous and/or widely illegal, e.g. prostitution, bomb-making, suicide.", + "Content that recruits or solicits terrorists; specific and detailed instructions on how to make a bomb; terrorists who document their attacks; praising acts of mass violence; content that shows captured hostages posted with the intent to solicit demands, threaten, or intimidate.", "Comment contains links in a list of \"blocked links\" in YouTube Studio > Settings > Community.", "Creator setting specific reasons. go/ytc-nextgen-community-settings-storage Comment contains words in a list of \"blocked words\" in YouTube Studio > Settings > Community.", "Held because the moderation policy is \"Hold all comments for review\".", "Comment from listed hidden users.", - "Content that recruits or solicits terrorists; specific and detailed instructions on how to make a bomb; terrorists who document their attacks; praising acts of mass violence; content that shows captured hostages posted with the intent to solicit demands, threaten, or intimidate.", "A privileged user, which can only be parent entity owner for ENTITY_COMMENT, but can be either parent entity channel owner or channel moderator for CHAT_MESSAGE CommentType, manually rejected the Comment. Their decision overrides any system flagging.", "Automod rejected due to above inappropriate score rejection threshold. Maps to ModerationReason.ABOVE_REJECT_INAPPROPRIATE_SCORE.", "Automod rejected due to containing more than allowed bad characters. Maps to ModerationReason.TOO_MANY_BAD_CHARS.", From af59b3870e78f4c6841623b8aa68a850902b8f86 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Date: Tue, 31 Oct 2023 14:06:03 +0000 Subject: [PATCH 12/29] feat(dataflow): update the api #### dataflow:v1b3 The following keys were added: - schemas.Job.properties.satisfiesPzi (Total Keys: 2) --- docs/dyn/dataflow_v1b3.projects.jobs.html | 7 +++++++ .../dataflow_v1b3.projects.locations.flexTemplates.html | 1 + docs/dyn/dataflow_v1b3.projects.locations.jobs.html | 6 ++++++ docs/dyn/dataflow_v1b3.projects.locations.templates.html | 2 ++ docs/dyn/dataflow_v1b3.projects.templates.html | 2 ++ .../discovery_cache/documents/dataflow.v1b3.json | 7 ++++++- 6 files changed, 24 insertions(+), 1 deletion(-) diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.html b/docs/dyn/dataflow_v1b3.projects.jobs.html index a6b67718200..4b32309260c 100644 --- a/docs/dyn/dataflow_v1b3.projects.jobs.html +++ b/docs/dyn/dataflow_v1b3.projects.jobs.html @@ -450,6 +450,7 @@ Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -803,6 +804,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1140,6 +1142,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1484,6 +1487,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1886,6 +1890,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2280,6 +2285,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2611,6 +2617,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html index ff48348ee93..7f83d220197 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html @@ -518,6 +518,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.jobs.html b/docs/dyn/dataflow_v1b3.projects.locations.jobs.html index 14a22ed084f..8bc7fdb2a57 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.jobs.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.jobs.html @@ -438,6 +438,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -774,6 +775,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1118,6 +1120,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1628,6 +1631,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2024,6 +2028,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2354,6 +2359,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.templates.html b/docs/dyn/dataflow_v1b3.projects.locations.templates.html index e269bfeeb9d..93ebcf2de23 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.templates.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.templates.html @@ -435,6 +435,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -913,6 +914,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.templates.html b/docs/dyn/dataflow_v1b3.projects.templates.html index 5b87b4c0768..a72f84cdf09 100644 --- a/docs/dyn/dataflow_v1b3.projects.templates.html +++ b/docs/dyn/dataflow_v1b3.projects.templates.html @@ -434,6 +434,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -912,6 +913,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json index 548877c8889..24431288693 100644 --- a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json +++ b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json @@ -2221,7 +2221,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://dataflow.googleapis.com/", "schemas": { "ApproximateProgress": { @@ -3932,6 +3932,11 @@ "$ref": "RuntimeUpdatableParams", "description": "This field may ONLY be modified at runtime using the projects.jobs.update method to adjust job behavior. This field has no effect when specified at job creation." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests.", "type": "boolean" From 01acfe0787e47037aca16f96dbefc164d449f8ca Mon Sep 17 00:00:00 2001 From: Yoshi AutomationDate: Tue, 31 Oct 2023 14:06:03 +0000 Subject: [PATCH 13/29] feat(dataproc): update the api #### dataproc:v1 The following keys were added: - schemas.GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig (Total Keys: 3) - schemas.ListWorkflowTemplatesResponse.properties.unreachable (Total Keys: 3) - schemas.UsageMetrics.properties.acceleratorType.type (Total Keys: 1) - schemas.UsageMetrics.properties.milliAcceleratorSeconds (Total Keys: 2) - schemas.UsageSnapshot.properties.acceleratorType.type (Total Keys: 1) - schemas.UsageSnapshot.properties.milliAccelerator (Total Keys: 2) - schemas.WorkflowTemplate.properties.encryptionConfig.$ref (Total Keys: 1) --- ...ataproc_v1.projects.locations.batches.html | 12 +++++ ...taproc_v1.projects.locations.sessions.html | 12 +++++ ....projects.locations.workflowTemplates.html | 24 +++++++++ ...v1.projects.regions.workflowTemplates.html | 24 +++++++++ .../documents/dataproc.v1.json | 49 +++++++++++++++++-- 5 files changed, 118 insertions(+), 3 deletions(-) diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.html b/docs/dyn/dataproc_v1.projects.locations.batches.html index 5b6849248c0..89ec5d48ffb 100644 --- a/docs/dyn/dataproc_v1.projects.locations.batches.html +++ b/docs/dyn/dataproc_v1.projects.locations.batches.html @@ -167,10 +167,14 @@ Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -356,10 +360,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -503,10 +511,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.html b/docs/dyn/dataproc_v1.projects.locations.sessions.html index e210b0d9d25..a6a74d771d6 100644 --- a/docs/dyn/dataproc_v1.projects.locations.sessions.html +++ b/docs/dyn/dataproc_v1.projects.locations.sessions.html @@ -155,10 +155,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -312,10 +316,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -409,10 +417,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) diff --git a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html index fad3a29cc5b..90cc8926789 100644 --- a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html @@ -128,6 +128,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -802,6 +805,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -1503,6 +1509,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2268,6 +2277,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2983,6 +2995,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -3646,6 +3661,9 @@Method Details
"version": 42, # Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. }, ], + "unreachable": [ # Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response. + "A String", + ], }
getConfig(name, x__xgafv=None)
Retrieve an Identity Toolkit project configuration.
-
- getPasskeyConfig(name, x__xgafv=None)
Retrieve a passkey configuration for an Identity Toolkit project.
updateConfig(name, body=None, updateMask=None, x__xgafv=None)
Update an Identity Toolkit project configuration.
-
- updatePasskeyConfig(name, body=None, updateMask=None, x__xgafv=None)
Update a passkey configuration for an Identity Toolkit project.
close()
@@ -340,29 +334,6 @@ getPasskeyConfig(name, x__xgafv=None)
- Retrieve a passkey configuration for an Identity Toolkit project. - -Args: - name: string, Required. The resource name of the config, for example: 'projects/my-awesome-project/passkeyConfig'. (required) - x__xgafv: string, V1 error format. - Allowed values - 1 - v1 error format - 2 - v2 error format - -Returns: - An object of the form: - - { # Configuration for signing in users using passkeys. - "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys. - "A String", - ], - "name": "A String", # Required. The name of the PasskeyConfig resource. - "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created. -}-
updateConfig(name, body=None, updateMask=None, x__xgafv=None)
Update an Identity Toolkit project configuration. @@ -792,39 +763,4 @@Method Details
}
updatePasskeyConfig(name, body=None, updateMask=None, x__xgafv=None)
- Update a passkey configuration for an Identity Toolkit project. - -Args: - name: string, Required. The name of the PasskeyConfig resource. (required) - body: object, The request body. - The object takes the form of: - -{ # Configuration for signing in users using passkeys. - "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys. - "A String", - ], - "name": "A String", # Required. The name of the PasskeyConfig resource. - "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created. -} - - updateMask: string, Optional. The update mask applies to the resource. Empty update mask will result in updating nothing. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask - x__xgafv: string, V1 error format. - Allowed values - 1 - v1 error format - 2 - v2 error format - -Returns: - An object of the form: - - { # Configuration for signing in users using passkeys. - "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys. - "A String", - ], - "name": "A String", # Required. The name of the PasskeyConfig resource. - "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created. -}-