From e40139ded573066644065b6676e64db8eddc684c Mon Sep 17 00:00:00 2001 From: yoshi-code-bot <70984784+yoshi-code-bot@users.noreply.github.com> Date: Tue, 31 Oct 2023 07:24:14 -0700 Subject: [PATCH] chore: Update discovery artifacts (#2273) ## Deleted keys were detected in the following stable discovery artifacts: compute v1 https://togithub.com/googleapis/google-api-python-client/commit/204fa5aa6b49170f4c7641fe147e51f361e1b9c1 contentwarehouse v1 https://togithub.com/googleapis/google-api-python-client/commit/84de9162568c3c9ac9a1589534a107512d88e14b identitytoolkit v2 https://togithub.com/googleapis/google-api-python-client/commit/3380caef861f8e81556738d2e3bed73a04c29d16 mybusinesslodging v1 https://togithub.com/googleapis/google-api-python-client/commit/0f6e412d1a75d101acb5eb03e5d2f36384f11376 places v1 https://togithub.com/googleapis/google-api-python-client/commit/fb2cf0d1f3198a33f39e515472dbb2f4ae4720d6 sqladmin v1 https://togithub.com/googleapis/google-api-python-client/commit/c487e30ff7ee30d86201e0d4bc0076b553c69863 testing v1 https://togithub.com/googleapis/google-api-python-client/commit/35d4629dbe736ea689dfd8d04ae9425b9862b4ec ## Deleted keys were detected in the following pre-stable discovery artifacts: compute alpha https://togithub.com/googleapis/google-api-python-client/commit/204fa5aa6b49170f4c7641fe147e51f361e1b9c1 compute beta https://togithub.com/googleapis/google-api-python-client/commit/204fa5aa6b49170f4c7641fe147e51f361e1b9c1 sqladmin v1beta4 https://togithub.com/googleapis/google-api-python-client/commit/c487e30ff7ee30d86201e0d4bc0076b553c69863 ## Discovery Artifact Change Summary: feat(aiplatform): update the api https://togithub.com/googleapis/google-api-python-client/commit/7bdb02a3ab59e7e1f15df898fdd0b0e0596a1f92 feat(analyticsadmin): update the api https://togithub.com/googleapis/google-api-python-client/commit/5779a441640f840f866736514a66718a5ed07e80 feat(androiddeviceprovisioning): update the api https://togithub.com/googleapis/google-api-python-client/commit/160c287e0dd64e2576f787b66fd6b9d7cf7e155e feat(appengine): update the api https://togithub.com/googleapis/google-api-python-client/commit/d6c3c723660b03319bae6ab0573b83d9f340cc07 feat(assuredworkloads): update the api https://togithub.com/googleapis/google-api-python-client/commit/2d0f2c5b561c1844be7f382155ec232a214c1f5c feat(batch): update the api https://togithub.com/googleapis/google-api-python-client/commit/38e594da9323540fa904a25d385c76b4fc6edb2c feat(cloudbilling): update the api https://togithub.com/googleapis/google-api-python-client/commit/d536b311d73aeac6ae0af89f3bcc22a204ce53f9 feat(compute): update the api https://togithub.com/googleapis/google-api-python-client/commit/204fa5aa6b49170f4c7641fe147e51f361e1b9c1 feat(connectors): update the api https://togithub.com/googleapis/google-api-python-client/commit/e6171f5380b7a0eb1fbf3a7b910f2c9fef0112b7 feat(contentwarehouse): update the api https://togithub.com/googleapis/google-api-python-client/commit/84de9162568c3c9ac9a1589534a107512d88e14b feat(dataflow): update the api https://togithub.com/googleapis/google-api-python-client/commit/af59b3870e78f4c6841623b8aa68a850902b8f86 feat(dataproc): update the api https://togithub.com/googleapis/google-api-python-client/commit/01acfe0787e47037aca16f96dbefc164d449f8ca feat(gmail): update the api https://togithub.com/googleapis/google-api-python-client/commit/4dc6c3595923f51dbcad4e1e3aee1b6cd183b814 feat(identitytoolkit): update the api https://togithub.com/googleapis/google-api-python-client/commit/3380caef861f8e81556738d2e3bed73a04c29d16 feat(metastore): update the api https://togithub.com/googleapis/google-api-python-client/commit/270ef650a7a7e678cd37faaf90ff3a5e4c23252a feat(mybusinesslodging): update the api https://togithub.com/googleapis/google-api-python-client/commit/0f6e412d1a75d101acb5eb03e5d2f36384f11376 feat(notebooks): update the api https://togithub.com/googleapis/google-api-python-client/commit/419948bd02ef47dec5456f9d1ce341eb19d0f738 feat(places): update the api https://togithub.com/googleapis/google-api-python-client/commit/fb2cf0d1f3198a33f39e515472dbb2f4ae4720d6 feat(recaptchaenterprise): update the api https://togithub.com/googleapis/google-api-python-client/commit/2985fd0f9c76efa304e9e16db3c362ebb654f11a feat(run): update the api https://togithub.com/googleapis/google-api-python-client/commit/954b59d3bc80fa007ac65a69f9337456fc8da06f feat(servicenetworking): update the api https://togithub.com/googleapis/google-api-python-client/commit/8eda3cbe99c0fc35a66edd52e40cda7b30f35bc9 feat(spanner): update the api https://togithub.com/googleapis/google-api-python-client/commit/d046f6e7f0b7be466423e06f9339c6edf911b97e feat(sqladmin): update the api https://togithub.com/googleapis/google-api-python-client/commit/c487e30ff7ee30d86201e0d4bc0076b553c69863 feat(testing): update the api https://togithub.com/googleapis/google-api-python-client/commit/35d4629dbe736ea689dfd8d04ae9425b9862b4ec feat(texttospeech): update the api https://togithub.com/googleapis/google-api-python-client/commit/44ffbb0a7bbbfc6452e57a05c213523774578364 feat(vmmigration): update the api https://togithub.com/googleapis/google-api-python-client/commit/f9cd5f95977effb262b17ce5d5b4c4895872954c feat(workflowexecutions): update the api https://togithub.com/googleapis/google-api-python-client/commit/c24e2608c3996d57b5e062fcb33c58f83f8838d7 --- ...sapproval_v1.folders.approvalRequests.html | 15 +- ...val_v1.organizations.approvalRequests.html | 15 +- ...approval_v1.projects.approvalRequests.html | 15 +- ...form_v1.projects.locations.customJobs.html | 8 +- ...ts.locations.hyperparameterTuningJobs.html | 68 +- ...latform_v1.projects.locations.nasJobs.html | 16 +- ...latform_v1.projects.locations.studies.html | 85 +- ...v1beta1.projects.locations.customJobs.html | 8 +- ...ts.locations.hyperparameterTuningJobs.html | 68 +- ...rm_v1beta1.projects.locations.nasJobs.html | 16 +- ...rm_v1beta1.projects.locations.studies.html | 85 +- docs/dyn/analyticsadmin_v1alpha.accounts.html | 8 + ...n_v1alpha.properties.conversionEvents.html | 24 + docs/dyn/analyticsadmin_v1beta.accounts.html | 8 + ...in_v1beta.properties.conversionEvents.html | 24 + ...eviceprovisioning_v1.partners.devices.html | 6 +- ...roidmanagement_v1.enterprises.devices.html | 2 +- .../appengine_v1.apps.services.versions.html | 12 + ..._v1.organizations.locations.workloads.html | 24 +- ...ta1.organizations.locations.workloads.html | 20 +- .../batch_v1.projects.locations.state.html | 1 + docs/dyn/biglake_v1.html | 111 + docs/dyn/biglake_v1.projects.html | 91 + ...projects.locations.catalogs.databases.html | 318 ++ ...s.locations.catalogs.databases.tables.html | 430 +++ ...iglake_v1.projects.locations.catalogs.html | 231 ++ docs/dyn/biglake_v1.projects.locations.html | 91 + docs/dyn/cloudbilling_v1.billingAccounts.html | 47 +- ...illing_v1.billingAccounts.subAccounts.html | 171 ++ docs/dyn/cloudbilling_v1.html | 5 + ...ling_v1.organizations.billingAccounts.html | 197 ++ docs/dyn/cloudbilling_v1.organizations.html | 91 + .../dyn/cloudtasks_v2.projects.locations.html | 6 +- ...oudtasks_v2.projects.locations.queues.html | 4 +- ...cloudtasks_v2beta2.projects.locations.html | 6 +- ...beta2.projects.locations.queues.tasks.html | 4 +- ...cloudtasks_v2beta3.projects.locations.html | 6 +- ...sks_v2beta3.projects.locations.queues.html | 4 +- ...beta3.projects.locations.queues.tasks.html | 4 +- docs/dyn/compute_alpha.backendServices.html | 14 +- ...ute_alpha.globalNetworkEndpointGroups.html | 3 - docs/dyn/compute_alpha.instances.html | 145 + .../compute_alpha.networkEndpointGroups.html | 4 - .../compute_alpha.regionBackendServices.html | 12 +- docs/dyn/compute_alpha.regionCommitments.html | 15 + ...ute_alpha.regionNetworkEndpointGroups.html | 3 - ...ations.connections.eventSubscriptions.html | 16 + ...cations.providers.connectors.versions.html | 2 + docs/dyn/content_v2_1.reports.html | 6 +- docs/dyn/dataflow_v1b3.projects.jobs.html | 7 + ...v1b3.projects.locations.flexTemplates.html | 1 + ...dataflow_v1b3.projects.locations.jobs.html | 6 + ...low_v1b3.projects.locations.templates.html | 2 + .../dyn/dataflow_v1b3.projects.templates.html | 2 + ...aplex_v1.projects.locations.dataScans.html | 10 +- ..._v1.projects.locations.dataScans.jobs.html | 4 +- ...ataproc_v1.projects.locations.batches.html | 12 + ...taproc_v1.projects.locations.sessions.html | 12 + ....projects.locations.workflowTemplates.html | 24 + ...v1.projects.regions.workflowTemplates.html | 24 + ..._v2.organizations.deidentifyTemplates.html | 8 +- ...dlp_v2.organizations.inspectTemplates.html | 8 +- ...zations.locations.deidentifyTemplates.html | 8 +- ...anizations.locations.discoveryConfigs.html | 150 +- ...lp_v2.organizations.locations.dlpJobs.html | 2 +- ...anizations.locations.inspectTemplates.html | 8 +- ...2.organizations.locations.jobTriggers.html | 8 +- ...ganizations.locations.storedInfoTypes.html | 8 +- .../dlp_v2.organizations.storedInfoTypes.html | 8 +- .../dlp_v2.projects.deidentifyTemplates.html | 8 +- docs/dyn/dlp_v2.projects.dlpJobs.html | 2 +- .../dyn/dlp_v2.projects.inspectTemplates.html | 8 +- docs/dyn/dlp_v2.projects.jobTriggers.html | 8 +- ...rojects.locations.deidentifyTemplates.html | 8 +- ...2.projects.locations.discoveryConfigs.html | 150 +- .../dlp_v2.projects.locations.dlpJobs.html | 2 +- ...2.projects.locations.inspectTemplates.html | 8 +- ...dlp_v2.projects.locations.jobTriggers.html | 8 +- ...v2.projects.locations.storedInfoTypes.html | 8 +- docs/dyn/dlp_v2.projects.storedInfoTypes.html | 8 +- .../gmail_v1.users.settings.cse.keypairs.html | 18 + docs/dyn/identitytoolkit_v1.accounts.html | 4 +- .../identitytoolkit_v1.projects.accounts.html | 2 +- ...ytoolkit_v1.projects.tenants.accounts.html | 2 +- docs/dyn/identitytoolkit_v2.projects.html | 64 - .../identitytoolkit_v2.projects.tenants.html | 64 - docs/dyn/index.md | 4 + ...1.projects.locations.services.backups.html | 15 + ...astore_v1.projects.locations.services.html | 20 + ...a.projects.locations.services.backups.html | 18 +- ...e_v1alpha.projects.locations.services.html | 24 +- ...a.projects.locations.services.backups.html | 18 +- ...re_v1beta.projects.locations.services.html | 24 +- .../monitoring_v3.projects.alertPolicies.html | 12 +- .../monitoring_v3.projects.timeSeries.html | 4 +- docs/dyn/mybusinesslodging_v1.locations.html | 30 +- ...ybusinesslodging_v1.locations.lodging.html | 10 +- ....projects.locations.lbRouteExtensions.html | 16 +- ...rojects.locations.lbTrafficExtensions.html | 8 +- ...books_v1.projects.locations.instances.html | 6 +- ...books_v2.projects.locations.instances.html | 38 + docs/dyn/places_v1.places.html | 891 +++++- docs/dyn/places_v1.places.photos.html | 112 + docs/dyn/pubsub_v1.projects.schemas.html | 2 +- docs/dyn/pubsub_v1.projects.snapshots.html | 6 +- .../dyn/pubsub_v1.projects.subscriptions.html | 4 +- docs/dyn/pubsub_v1.projects.topics.html | 16 +- ...chaenterprise_v1.projects.assessments.html | 33 +- .../dyn/run_v1.namespaces.configurations.html | 8 +- .../dyn/run_v1.namespaces.domainmappings.html | 8 +- docs/dyn/run_v1.namespaces.executions.html | 6 +- docs/dyn/run_v1.namespaces.jobs.html | 26 +- docs/dyn/run_v1.namespaces.revisions.html | 4 +- docs/dyn/run_v1.namespaces.routes.html | 4 +- docs/dyn/run_v1.namespaces.services.html | 24 +- docs/dyn/run_v1.namespaces.tasks.html | 4 +- ..._v1.projects.locations.configurations.html | 8 +- ..._v1.projects.locations.domainmappings.html | 8 +- .../run_v1.projects.locations.revisions.html | 4 +- .../dyn/run_v1.projects.locations.routes.html | 4 +- .../run_v1.projects.locations.services.html | 24 +- .../run_v2.projects.locations.services.html | 12 + docs/dyn/servicenetworking_v1.services.html | 1 + ...v1.services.projects.global_.networks.html | 22 + docs/dyn/spanner_v1.projects.instances.html | 48 + docs/dyn/sqladmin_v1.backupRuns.html | 8 +- docs/dyn/sqladmin_v1.databases.html | 16 +- docs/dyn/sqladmin_v1.instances.html | 109 +- docs/dyn/sqladmin_v1.operations.html | 8 +- docs/dyn/sqladmin_v1.projects.instances.html | 16 +- docs/dyn/sqladmin_v1.sslCerts.html | 8 +- docs/dyn/sqladmin_v1.users.html | 12 +- docs/dyn/sqladmin_v1beta4.backupRuns.html | 8 +- docs/dyn/sqladmin_v1beta4.databases.html | 16 +- docs/dyn/sqladmin_v1beta4.instances.html | 109 +- docs/dyn/sqladmin_v1beta4.operations.html | 8 +- .../sqladmin_v1beta4.projects.instances.html | 16 +- docs/dyn/sqladmin_v1beta4.sslCerts.html | 8 +- docs/dyn/sqladmin_v1beta4.users.html | 12 +- .../testing_v1.projects.deviceSessions.html | 72 +- .../texttospeech_v1.projects.locations.html | 2 +- docs/dyn/texttospeech_v1.text.html | 2 +- ...xttospeech_v1beta1.projects.locations.html | 2 +- docs/dyn/texttospeech_v1beta1.text.html | 2 +- ...gration_v1.projects.locations.sources.html | 12 + ...ations.sources.migratingVms.cloneJobs.html | 9 + ...ions.sources.migratingVms.cutoverJobs.html | 9 + ...ojects.locations.sources.migratingVms.html | 72 + ...n_v1alpha1.projects.locations.sources.html | 12 + ...ations.sources.migratingVms.cloneJobs.html | 9 + ...ions.sources.migratingVms.cutoverJobs.html | 9 + ...ojects.locations.sources.migratingVms.html | 72 + ...ojects.locations.workflows.executions.html | 5 + ...ions.workflows.executions.stepEntries.html | 204 ++ ...orkstationClusters.workstationConfigs.html | 20 +- ...orkstationClusters.workstationConfigs.html | 20 +- .../acceleratedmobilepageurl.v1.json | 2 +- .../accesscontextmanager.v1beta.json | 2 +- .../discovery_cache/documents/acmedns.v1.json | 2 +- .../documents/adexchangebuyer2.v2beta1.json | 2 +- .../documents/admin.datatransfer_v1.json | 2 +- .../documents/admin.directory_v1.json | 2 +- .../documents/admin.reports_v1.json | 2 +- .../discovery_cache/documents/admob.v1.json | 2 +- .../documents/admob.v1beta.json | 2 +- .../discovery_cache/documents/adsense.v2.json | 2 +- .../documents/advisorynotifications.v1.json | 2 +- .../documents/aiplatform.v1.json | 66 +- .../documents/aiplatform.v1beta1.json | 66 +- .../documents/alertcenter.v1beta1.json | 2 +- .../documents/analyticsadmin.v1alpha.json | 22 +- .../documents/analyticsadmin.v1beta.json | 22 +- .../documents/analyticsdata.v1beta.json | 2 +- .../documents/analyticshub.v1.json | 2 +- .../documents/analyticshub.v1beta1.json | 2 +- .../androiddeviceprovisioning.v1.json | 16 +- .../documents/androidenterprise.v1.json | 2 +- .../documents/androidmanagement.v1.json | 18 +- .../documents/androidpublisher.v3.json | 15 +- .../documents/apigateway.v1.json | 2 +- .../documents/apigateway.v1beta.json | 2 +- .../discovery_cache/documents/apikeys.v2.json | 2 +- .../documents/appengine.v1.json | 10 +- .../documents/appengine.v1alpha.json | 2 +- .../documents/appengine.v1beta.json | 2 +- .../documents/area120tables.v1alpha1.json | 2 +- .../documents/artifactregistry.v1.json | 2 +- .../documents/artifactregistry.v1beta1.json | 2 +- .../documents/artifactregistry.v1beta2.json | 2 +- .../documents/assuredworkloads.v1.json | 7 +- .../documents/assuredworkloads.v1beta1.json | 7 +- .../authorizedbuyersmarketplace.v1.json | 2 +- .../documents/backupdr.v1.json | 2 +- .../documents/baremetalsolution.v2.json | 2 +- .../discovery_cache/documents/batch.v1.json | 6 +- .../discovery_cache/documents/biglake.v1.json | 910 ++++++ .../documents/bigquery.v2.json | 2 +- .../documents/bigqueryconnection.v1beta1.json | 2 +- .../documents/bigquerydatatransfer.v1.json | 2 +- .../documents/bigqueryreservation.v1.json | 2 +- .../documents/bigtableadmin.v2.json | 2 +- .../documents/billingbudgets.v1.json | 2 +- .../documents/billingbudgets.v1beta1.json | 2 +- .../documents/binaryauthorization.v1.json | 2 +- .../binaryauthorization.v1beta1.json | 2 +- .../documents/blockchainnodeengine.v1.json | 2 +- .../discovery_cache/documents/blogger.v2.json | 2 +- .../discovery_cache/documents/blogger.v3.json | 2 +- .../discovery_cache/documents/books.v1.json | 2 +- .../businessprofileperformance.v1.json | 2 +- .../documents/calendar.v3.json | 2 +- .../discovery_cache/documents/chat.v1.json | 2 +- .../documents/checks.v1alpha.json | 2 +- .../documents/chromemanagement.v1.json | 2 +- .../documents/chromepolicy.v1.json | 2 +- .../documents/chromeuxreport.v1.json | 2 +- .../documents/classroom.v1.json | 2 +- .../documents/cloudasset.v1.json | 2 +- .../documents/cloudasset.v1beta1.json | 2 +- .../documents/cloudasset.v1p1beta1.json | 2 +- .../documents/cloudasset.v1p5beta1.json | 2 +- .../documents/cloudasset.v1p7beta1.json | 2 +- .../documents/cloudbilling.v1.json | 245 +- .../documents/cloudbilling.v1beta.json | 2 +- .../documents/cloudbuild.v1.json | 2 +- .../documents/cloudbuild.v2.json | 2 +- .../documents/cloudchannel.v1.json | 2 +- .../documents/clouddeploy.v1.json | 2 +- .../clouderrorreporting.v1beta1.json | 2 +- .../documents/cloudfunctions.v1.json | 2 +- .../documents/cloudfunctions.v2.json | 2 +- .../documents/cloudfunctions.v2alpha.json | 2 +- .../documents/cloudfunctions.v2beta.json | 2 +- .../documents/cloudidentity.v1.json | 2 +- .../documents/cloudidentity.v1beta1.json | 2 +- .../documents/cloudkms.v1.json | 2 +- .../documents/cloudprofiler.v2.json | 2 +- .../documents/cloudresourcemanager.v1.json | 2 +- .../cloudresourcemanager.v1beta1.json | 2 +- .../documents/cloudresourcemanager.v2.json | 2 +- .../cloudresourcemanager.v2beta1.json | 2 +- .../documents/cloudresourcemanager.v3.json | 2 +- .../documents/cloudscheduler.v1.json | 2 +- .../documents/cloudscheduler.v1beta1.json | 2 +- .../documents/cloudsearch.v1.json | 2 +- .../documents/cloudshell.v1.json | 2 +- .../documents/cloudsupport.v2.json | 2 +- .../documents/cloudsupport.v2beta.json | 2 +- .../documents/cloudtasks.v2.json | 6 +- .../documents/cloudtasks.v2beta2.json | 6 +- .../documents/cloudtasks.v2beta3.json | 8 +- .../documents/compute.alpha.json | 120 +- .../documents/compute.beta.json | 7 +- .../discovery_cache/documents/compute.v1.json | 7 +- .../documents/connectors.v1.json | 59 +- .../documents/connectors.v2.json | 2 +- .../documents/containeranalysis.v1.json | 2 +- .../documents/containeranalysis.v1alpha1.json | 2 +- .../documents/containeranalysis.v1beta1.json | 2 +- .../documents/content.v2.1.json | 8 +- .../documents/contentwarehouse.v1.json | 2696 ++++------------- .../documents/customsearch.v1.json | 2 +- .../documents/datacatalog.v1.json | 2 +- .../documents/datacatalog.v1beta1.json | 2 +- .../documents/dataflow.v1b3.json | 7 +- .../documents/datalineage.v1.json | 2 +- .../documents/datamigration.v1.json | 2 +- .../documents/datamigration.v1beta1.json | 2 +- .../documents/datapipelines.v1.json | 2 +- .../documents/dataplex.v1.json | 4 +- .../documents/dataproc.v1.json | 49 +- .../documents/datastore.v1.json | 2 +- .../documents/datastore.v1beta1.json | 2 +- .../documents/datastore.v1beta3.json | 2 +- .../documents/dialogflow.v2.json | 2 +- .../documents/dialogflow.v2beta1.json | 2 +- .../documents/dialogflow.v3.json | 2 +- .../documents/dialogflow.v3beta1.json | 2 +- .../documents/digitalassetlinks.v1.json | 2 +- .../documents/discoveryengine.v1alpha.json | 2 +- .../documents/discoveryengine.v1beta.json | 2 +- .../documents/displayvideo.v1.json | 23 +- .../documents/displayvideo.v2.json | 23 +- .../documents/displayvideo.v3.json | 27 +- .../discovery_cache/documents/dlp.v2.json | 178 +- .../discovery_cache/documents/dns.v1.json | 2 +- .../documents/dns.v1beta2.json | 2 +- .../discovery_cache/documents/docs.v1.json | 2 +- .../documents/documentai.v1.json | 2 +- .../documents/documentai.v1beta2.json | 2 +- .../documents/documentai.v1beta3.json | 2 +- .../discovery_cache/documents/domains.v1.json | 8 +- .../documents/domains.v1alpha2.json | 8 +- .../documents/domains.v1beta1.json | 8 +- .../documents/domainsrdap.v1.json | 2 +- .../documents/doubleclickbidmanager.v2.json | 2 +- .../documents/doubleclicksearch.v2.json | 2 +- .../discovery_cache/documents/drive.v2.json | 2 +- .../discovery_cache/documents/drive.v3.json | 2 +- .../documents/driveactivity.v2.json | 2 +- .../documents/drivelabels.v2.json | 2 +- .../documents/drivelabels.v2beta.json | 2 +- .../documents/essentialcontacts.v1.json | 2 +- .../documents/eventarc.v1.json | 2 +- .../documents/eventarc.v1beta1.json | 2 +- .../documents/factchecktools.v1alpha1.json | 2 +- .../discovery_cache/documents/fcm.v1.json | 2 +- .../documents/fcmdata.v1beta1.json | 2 +- .../discovery_cache/documents/file.v1.json | 14 +- .../documents/file.v1beta1.json | 14 +- .../documents/firebaseappdistribution.v1.json | 2 +- .../documents/firebasedatabase.v1beta.json | 2 +- .../documents/firebasehosting.v1.json | 2 +- .../documents/firebasehosting.v1beta1.json | 2 +- .../documents/firebaseml.v1.json | 2 +- .../documents/firebaseml.v1beta2.json | 2 +- .../documents/firebasestorage.v1beta.json | 2 +- .../documents/firestore.v1.json | 2 +- .../documents/firestore.v1beta1.json | 2 +- .../documents/firestore.v1beta2.json | 2 +- .../discovery_cache/documents/fitness.v1.json | 2 +- .../discovery_cache/documents/forms.v1.json | 2 +- .../discovery_cache/documents/games.v1.json | 2 +- .../gamesConfiguration.v1configuration.json | 2 +- .../gamesManagement.v1management.json | 2 +- .../documents/gkebackup.v1.json | 2 +- .../discovery_cache/documents/gkehub.v1.json | 2 +- .../documents/gkehub.v1alpha.json | 2 +- .../documents/gkehub.v1alpha2.json | 2 +- .../documents/gkehub.v1beta.json | 2 +- .../documents/gkehub.v1beta1.json | 2 +- .../documents/gkehub.v2alpha.json | 2 +- .../documents/gkeonprem.v1.json | 2 +- .../discovery_cache/documents/gmail.v1.json | 17 +- .../documents/gmailpostmastertools.v1.json | 2 +- .../gmailpostmastertools.v1beta1.json | 2 +- .../documents/groupsmigration.v1.json | 2 +- .../documents/healthcare.v1.json | 2 +- .../documents/healthcare.v1beta1.json | 2 +- .../documents/homegraph.v1.json | 2 +- .../documents/iamcredentials.v1.json | 2 +- .../documents/iap.v1beta1.json | 2 +- .../documents/identitytoolkit.v1.json | 6 +- .../documents/identitytoolkit.v2.json | 146 +- .../documents/indexing.v3.json | 2 +- .../discovery_cache/documents/jobs.v3.json | 2 +- .../discovery_cache/documents/jobs.v4.json | 2 +- .../discovery_cache/documents/keep.v1.json | 2 +- .../documents/kmsinventory.v1.json | 2 +- .../documents/language.v1.json | 2 +- .../documents/language.v1beta2.json | 2 +- .../documents/language.v2.json | 2 +- .../documents/libraryagent.v1.json | 2 +- .../documents/licensing.v1.json | 2 +- .../documents/lifesciences.v2beta.json | 2 +- .../documents/localservices.v1.json | 2 +- .../documents/memcache.v1.json | 2 +- .../documents/memcache.v1beta2.json | 2 +- .../documents/metastore.v1.json | 28 +- .../documents/metastore.v1alpha.json | 8 +- .../documents/metastore.v1beta.json | 8 +- .../documents/migrationcenter.v1.json | 2 +- .../documents/migrationcenter.v1alpha1.json | 2 +- .../documents/monitoring.v1.json | 2 +- .../documents/monitoring.v3.json | 6 +- .../mybusinessaccountmanagement.v1.json | 2 +- .../mybusinessbusinessinformation.v1.json | 2 +- .../documents/mybusinesslodging.v1.json | 19 +- .../documents/mybusinessnotifications.v1.json | 2 +- .../documents/mybusinessplaceactions.v1.json | 2 +- .../documents/mybusinessqanda.v1.json | 2 +- .../documents/mybusinessverifications.v1.json | 2 +- .../documents/networkmanagement.v1.json | 2 +- .../documents/networkmanagement.v1beta1.json | 2 +- .../documents/networksecurity.v1.json | 2 +- .../documents/networksecurity.v1beta1.json | 2 +- .../documents/networkservices.v1.json | 2 +- .../documents/networkservices.v1beta1.json | 8 +- .../documents/notebooks.v1.json | 4 +- .../documents/notebooks.v2.json | 104 +- .../documents/ondemandscanning.v1.json | 2 +- .../documents/ondemandscanning.v1beta1.json | 2 +- .../documents/orgpolicy.v2.json | 2 +- .../discovery_cache/documents/oslogin.v1.json | 2 +- .../documents/oslogin.v1alpha.json | 2 +- .../documents/oslogin.v1beta.json | 2 +- .../documents/pagespeedonline.v5.json | 2 +- .../paymentsresellersubscription.v1.json | 2 +- .../discovery_cache/documents/people.v1.json | 2 +- .../discovery_cache/documents/places.v1.json | 636 +++- .../documents/playcustomapp.v1.json | 2 +- .../playdeveloperreporting.v1alpha1.json | 2 +- .../playdeveloperreporting.v1beta1.json | 2 +- .../documents/playgrouping.v1alpha1.json | 2 +- .../documents/playintegrity.v1.json | 2 +- .../documents/policyanalyzer.v1.json | 2 +- .../documents/policyanalyzer.v1beta1.json | 2 +- .../documents/policysimulator.v1.json | 2 +- .../documents/policysimulator.v1alpha.json | 2 +- .../documents/policysimulator.v1beta.json | 2 +- .../documents/policytroubleshooter.v1.json | 2 +- .../policytroubleshooter.v1beta.json | 2 +- .../documents/privateca.v1.json | 2 +- .../documents/privateca.v1beta1.json | 2 +- .../documents/prod_tt_sasportal.v1alpha1.json | 2 +- .../documents/publicca.v1.json | 2 +- .../documents/publicca.v1alpha1.json | 2 +- .../documents/publicca.v1beta1.json | 2 +- .../discovery_cache/documents/pubsub.v1.json | 14 +- .../documents/pubsub.v1beta1a.json | 2 +- .../documents/pubsub.v1beta2.json | 2 +- .../documents/pubsublite.v1.json | 2 +- .../rapidmigrationassessment.v1.json | 2 +- .../readerrevenuesubscriptionlinking.v1.json | 2 +- .../documents/realtimebidding.v1.json | 2 +- .../documents/recaptchaenterprise.v1.json | 59 +- .../recommendationengine.v1beta1.json | 2 +- .../documents/recommender.v1.json | 2 +- .../documents/recommender.v1beta1.json | 2 +- .../discovery_cache/documents/redis.v1.json | 2 +- .../documents/redis.v1beta1.json | 2 +- .../documents/reseller.v1.json | 2 +- .../documents/resourcesettings.v1.json | 2 +- .../discovery_cache/documents/retail.v2.json | 2 +- .../documents/retail.v2alpha.json | 2 +- .../documents/retail.v2beta.json | 2 +- .../discovery_cache/documents/run.v1.json | 4 +- .../discovery_cache/documents/run.v2.json | 18 +- .../documents/runtimeconfig.v1.json | 2 +- .../documents/safebrowsing.v4.json | 2 +- .../documents/safebrowsing.v5.json | 2 +- .../documents/searchconsole.v1.json | 2 +- .../documents/secretmanager.v1.json | 2 +- .../documents/secretmanager.v1beta1.json | 2 +- .../documents/securitycenter.v1.json | 2 +- .../documents/securitycenter.v1beta1.json | 2 +- .../documents/securitycenter.v1beta2.json | 2 +- .../serviceconsumermanagement.v1.json | 2 +- .../serviceconsumermanagement.v1beta1.json | 2 +- .../documents/servicecontrol.v1.json | 2 +- .../documents/servicecontrol.v2.json | 2 +- .../documents/servicedirectory.v1.json | 2 +- .../documents/servicedirectory.v1beta1.json | 2 +- .../documents/servicemanagement.v1.json | 2 +- .../documents/servicenetworking.v1.json | 44 +- .../documents/servicenetworking.v1beta.json | 14 +- .../documents/serviceusage.v1.json | 2 +- .../documents/serviceusage.v1beta1.json | 2 +- .../discovery_cache/documents/slides.v1.json | 2 +- .../discovery_cache/documents/spanner.v1.json | 63 + .../discovery_cache/documents/speech.v1.json | 2 +- .../documents/speech.v1p1beta1.json | 2 +- .../documents/sqladmin.v1.json | 32 +- .../documents/sqladmin.v1beta4.json | 32 +- .../discovery_cache/documents/storage.v1.json | 4 +- .../documents/storagetransfer.v1.json | 2 +- .../documents/streetviewpublish.v1.json | 2 +- .../discovery_cache/documents/sts.v1.json | 2 +- .../discovery_cache/documents/sts.v1beta.json | 2 +- .../documents/tagmanager.v1.json | 2 +- .../documents/tagmanager.v2.json | 2 +- .../discovery_cache/documents/tasks.v1.json | 2 +- .../discovery_cache/documents/testing.v1.json | 9 +- .../documents/texttospeech.v1.json | 5 +- .../documents/texttospeech.v1beta1.json | 5 +- .../documents/toolresults.v1beta3.json | 2 +- .../discovery_cache/documents/tpu.v1.json | 2 +- .../documents/tpu.v1alpha1.json | 2 +- .../discovery_cache/documents/tpu.v2.json | 2 +- .../documents/tpu.v2alpha1.json | 2 +- .../documents/trafficdirector.v2.json | 2 +- .../documents/transcoder.v1.json | 2 +- .../documents/travelimpactmodel.v1.json | 2 +- .../documents/verifiedaccess.v1.json | 2 +- .../documents/verifiedaccess.v2.json | 2 +- .../documents/versionhistory.v1.json | 2 +- .../discovery_cache/documents/vision.v1.json | 2 +- .../documents/vision.v1p1beta1.json | 2 +- .../documents/vision.v1p2beta1.json | 2 +- .../documents/vmmigration.v1.json | 43 +- .../documents/vmmigration.v1alpha1.json | 43 +- .../documents/vpcaccess.v1beta1.json | 2 +- .../discovery_cache/documents/webrisk.v1.json | 2 +- .../documents/websecurityscanner.v1.json | 8 +- .../documents/websecurityscanner.v1alpha.json | 22 +- .../documents/websecurityscanner.v1beta.json | 8 +- .../documents/workflowexecutions.v1.json | 304 +- .../documents/workflowexecutions.v1beta.json | 2 +- .../documents/workflows.v1.json | 2 +- .../documents/workflows.v1beta.json | 2 +- .../documents/workloadmanager.v1.json | 2 +- .../documents/workstations.v1.json | 6 +- .../documents/workstations.v1beta.json | 6 +- .../discovery_cache/documents/youtube.v3.json | 2 +- .../documents/youtubeAnalytics.v2.json | 2 +- .../documents/youtubereporting.v1.json | 2 +- 496 files changed, 8741 insertions(+), 3890 deletions(-) create mode 100644 docs/dyn/biglake_v1.html create mode 100644 docs/dyn/biglake_v1.projects.html create mode 100644 docs/dyn/biglake_v1.projects.locations.catalogs.databases.html create mode 100644 docs/dyn/biglake_v1.projects.locations.catalogs.databases.tables.html create mode 100644 docs/dyn/biglake_v1.projects.locations.catalogs.html create mode 100644 docs/dyn/biglake_v1.projects.locations.html create mode 100644 docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html create mode 100644 docs/dyn/cloudbilling_v1.organizations.billingAccounts.html create mode 100644 docs/dyn/cloudbilling_v1.organizations.html create mode 100644 docs/dyn/places_v1.places.photos.html create mode 100644 docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.stepEntries.html create mode 100644 googleapiclient/discovery_cache/documents/biglake.v1.json diff --git a/docs/dyn/accessapproval_v1.folders.approvalRequests.html b/docs/dyn/accessapproval_v1.folders.approvalRequests.html index 8ec490186b6..27e2b2380db 100644 --- a/docs/dyn/accessapproval_v1.folders.approvalRequests.html +++ b/docs/dyn/accessapproval_v1.folders.approvalRequests.html @@ -135,8 +135,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -195,8 +194,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -244,8 +242,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -299,8 +296,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -353,8 +349,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location diff --git a/docs/dyn/accessapproval_v1.organizations.approvalRequests.html b/docs/dyn/accessapproval_v1.organizations.approvalRequests.html index e12ae3578f5..aa68cc96f05 100644 --- a/docs/dyn/accessapproval_v1.organizations.approvalRequests.html +++ b/docs/dyn/accessapproval_v1.organizations.approvalRequests.html @@ -135,8 +135,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -195,8 +194,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -244,8 +242,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -299,8 +296,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -353,8 +349,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location diff --git a/docs/dyn/accessapproval_v1.projects.approvalRequests.html b/docs/dyn/accessapproval_v1.projects.approvalRequests.html index 9a5aaceb594..7cec004a579 100644 --- a/docs/dyn/accessapproval_v1.projects.approvalRequests.html +++ b/docs/dyn/accessapproval_v1.projects.approvalRequests.html @@ -135,8 +135,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -195,8 +194,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -244,8 +242,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -299,8 +296,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location @@ -353,8 +349,7 @@

Method Details

}, "name": "A String", # The resource name of the request. Format is "{projects|folders|organizations}/{id}/approvalRequests/{approval_request}". "requestTime": "A String", # The time at which approval was requested. - "requestedDuration": "A String", # The requested access duration. - "requestedExpiration": "A String", # The original requested expiration for the approval. Calculated by adding the requested_duration to the request_time. + "requestedExpiration": "A String", # The requested expiration for the approval. If the request is approved, access will be granted from the time of approval until the expiration time. "requestedLocations": { # Home office and physical location of the principal. # The locations for which approval is being requested. "principalOfficeCountry": "A String", # The "home office" location of the principal. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location "principalPhysicalLocationCountry": "A String", # Physical location of the principal at the time of the access. A two-letter country code (ISO 3166-1 alpha-2), such as "US", "DE" or "GB" or a region code. In some limited situations Google systems may refer refer to a region code instead of a country code. Possible Region Codes: * ASI: Asia * EUR: Europe * OCE: Oceania * AFR: Africa * NAM: North America * SAM: South America * ANT: Antarctica * ANY: Any location diff --git a/docs/dyn/aiplatform_v1.projects.locations.customJobs.html b/docs/dyn/aiplatform_v1.projects.locations.customJobs.html index c1adc5da1a4..dc6d6de35ef 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.customJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.customJobs.html @@ -164,7 +164,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -274,7 +274,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -426,7 +426,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -549,7 +549,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html b/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html index 8c742cdf1dc..c66ee00d780 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.hyperparameterTuningJobs.html @@ -240,6 +240,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, "trialJobSpec": { # Represents the spec of a CustomJob. # Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. "baseOutputDirectory": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` @@ -250,7 +265,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -470,6 +485,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, "trialJobSpec": { # Represents the spec of a CustomJob. # Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. "baseOutputDirectory": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` @@ -480,7 +510,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -742,6 +772,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, "trialJobSpec": { # Represents the spec of a CustomJob. # Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. "baseOutputDirectory": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` @@ -752,7 +797,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -985,6 +1030,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, "trialJobSpec": { # Represents the spec of a CustomJob. # Required. The spec of a trial job. The same spec applies to the CustomJobs created in all the trials. "baseOutputDirectory": { # The Google Cloud Storage location where the output is to be written to. # The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For HyperparameterTuningJob, the baseOutputDirectory of each child CustomJob backing a Trial is set to a subdirectory of name id under its parent HyperparameterTuningJob's baseOutputDirectory. The following Vertex AI environment variables will be passed to containers or python modules when this field is set: For CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` @@ -995,7 +1055,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html b/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html index 83ded9ac50c..c66feb0e0f8 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.nasJobs.html @@ -220,7 +220,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -298,7 +298,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -462,7 +462,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -540,7 +540,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -746,7 +746,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -824,7 +824,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -1001,7 +1001,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -1079,7 +1079,7 @@

Method Details

"experiment": "A String", # Optional. The Experiment associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` "experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.studies.html b/docs/dyn/aiplatform_v1.projects.locations.studies.html index 0882f98a7e4..10bb6a2ebf0 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.studies.html +++ b/docs/dyn/aiplatform_v1.projects.locations.studies.html @@ -120,7 +120,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # A message representing a Study. +{ # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -202,6 +202,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, } @@ -213,7 +228,7 @@

Method Details

Returns: An object of the form: - { # A message representing a Study. + { # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -295,6 +310,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, } @@ -331,7 +361,7 @@

Method Details

Returns: An object of the form: - { # A message representing a Study. + { # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -413,6 +443,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, } @@ -436,7 +481,7 @@

Method Details

{ # Response message for VizierService.ListStudies. "nextPageToken": "A String", # Passes this token as the `page_token` field of the request for a subsequent call. If this field is omitted, there are no subsequent pages. "studies": [ # The studies associated with the project. - { # A message representing a Study. + { # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -518,6 +563,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, }, ], @@ -559,7 +619,7 @@

Method Details

Returns: An object of the form: - { # A message representing a Study. + { # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -641,6 +701,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, }, } diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html index 9260d3ba929..e9d288d8626 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.customJobs.html @@ -165,7 +165,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -276,7 +276,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -429,7 +429,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -553,7 +553,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html index 8075f4874af..ee859e575d5 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.hyperparameterTuningJobs.html @@ -247,6 +247,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies @@ -264,7 +279,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -491,6 +506,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies @@ -508,7 +538,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -777,6 +807,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies @@ -794,7 +839,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -1034,6 +1079,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies @@ -1051,7 +1111,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html index 23aa38fcc74..79c50f7623a 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.nasJobs.html @@ -221,7 +221,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -300,7 +300,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -465,7 +465,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -544,7 +544,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -751,7 +751,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -830,7 +830,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -1008,7 +1008,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], @@ -1087,7 +1087,7 @@

Method Details

"experimentRun": "A String", # Optional. The Experiment Run associated with this job. Format: `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` "network": "A String", # Optional. The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Job should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. To specify this field, you must have already [configured VPC Network Peering for Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If this field is left unspecified, the job is not peered with any network. "persistentResourceId": "A String", # Optional. The ID of the PersistentResource in the same Project and Location which to run If this is specified, the job will be run on existing machines held by the PersistentResource instead of on-demand short-live machines. The network and CMEK configs on the job should be consistent with those on the PersistentResource, otherwise, the job will be rejected. - "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations + "protectedArtifactLocationId": "A String", # The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations "reservedIpRanges": [ # Optional. A list of names for the reserved ip ranges under the VPC network that can be used for this job. If set, we will deploy the job within the provided ip ranges. Otherwise, the job will be deployed to any ip ranges under the provided VPC network. Example: ['vertex-ai-ip-range']. "A String", ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.studies.html b/docs/dyn/aiplatform_v1beta1.projects.locations.studies.html index 5d16f7d2192..806c85e918c 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.studies.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.studies.html @@ -120,7 +120,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # A message representing a Study. +{ # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -209,6 +209,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies @@ -226,7 +241,7 @@

Method Details

Returns: An object of the form: - { # A message representing a Study. + { # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -315,6 +330,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies @@ -357,7 +387,7 @@

Method Details

Returns: An object of the form: - { # A message representing a Study. + { # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -446,6 +476,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies @@ -475,7 +520,7 @@

Method Details

{ # Response message for VizierService.ListStudies. "nextPageToken": "A String", # Passes this token as the `page_token` field of the request for a subsequent call. If this field is omitted, there are no subsequent pages. "studies": [ # The studies associated with the project. - { # A message representing a Study. + { # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -564,6 +609,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies @@ -611,7 +671,7 @@

Method Details

Returns: An object of the form: - { # A message representing a Study. + { # A message representing a Study. Next id: 12 "createTime": "A String", # Output only. Time at which the study was created. "displayName": "A String", # Required. Describes the Study, default value is empty string. "inactiveReason": "A String", # Output only. A human readable reason why the Study is inactive. This should be empty if a study is ACTIVE or COMPLETED. @@ -700,6 +760,21 @@

Method Details

"scaleType": "A String", # How the parameter should be scaled. Leave unset for `CATEGORICAL` parameters. }, ], + "studyStoppingConfig": { # The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection. # Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition. + "maxDurationNoProgress": "A String", # If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies. + "maxNumTrials": 42, # If there are more than this many trials, stop the study. + "maxNumTrialsNoProgress": 42, # If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies. + "maximumRuntimeConstraint": { # Time-based Constraint for Study # If the specified time or duration has passed, stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "minNumTrials": 42, # If there are fewer than this many COMPLETED trials, do not stop the study. + "minimumRuntimeConstraint": { # Time-based Constraint for Study # Each "stopping rule" in this proto specifies an "if" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose "if" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study. + "endTime": "A String", # Compares the wallclock time to this time. Must use UTC timezone. + "maxDuration": "A String", # Counts the wallclock time passed since the creation of this Study. + }, + "shouldStopAsap": True or False, # If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth). + }, "transferLearningConfig": { # This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here. # The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob "disableTransferLearning": True or False, # Flag to to manually prevent vizier from using transfer learning on a new study. Otherwise, vizier will automatically determine whether or not to use transfer learning. "priorStudyNames": [ # Output only. Names of previously completed studies diff --git a/docs/dyn/analyticsadmin_v1alpha.accounts.html b/docs/dyn/analyticsadmin_v1alpha.accounts.html index 21483195863..abf97359cf5 100644 --- a/docs/dyn/analyticsadmin_v1alpha.accounts.html +++ b/docs/dyn/analyticsadmin_v1alpha.accounts.html @@ -741,6 +741,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -1204,6 +1208,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} diff --git a/docs/dyn/analyticsadmin_v1alpha.properties.conversionEvents.html b/docs/dyn/analyticsadmin_v1alpha.properties.conversionEvents.html index 4bf550b5f19..e34d131b8e1 100644 --- a/docs/dyn/analyticsadmin_v1alpha.properties.conversionEvents.html +++ b/docs/dyn/analyticsadmin_v1alpha.properties.conversionEvents.html @@ -114,6 +114,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -131,6 +135,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -173,6 +181,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -201,6 +213,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -237,6 +253,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -255,6 +275,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} diff --git a/docs/dyn/analyticsadmin_v1beta.accounts.html b/docs/dyn/analyticsadmin_v1beta.accounts.html index 4757aa39c1c..ebff525d555 100644 --- a/docs/dyn/analyticsadmin_v1beta.accounts.html +++ b/docs/dyn/analyticsadmin_v1beta.accounts.html @@ -543,6 +543,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -620,6 +624,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} diff --git a/docs/dyn/analyticsadmin_v1beta.properties.conversionEvents.html b/docs/dyn/analyticsadmin_v1beta.properties.conversionEvents.html index 07a7093cf8e..95a9689dce9 100644 --- a/docs/dyn/analyticsadmin_v1beta.properties.conversionEvents.html +++ b/docs/dyn/analyticsadmin_v1beta.properties.conversionEvents.html @@ -114,6 +114,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -131,6 +135,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -173,6 +181,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -201,6 +213,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -237,6 +253,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} @@ -255,6 +275,10 @@

Method Details

"countingMethod": "A String", # Optional. The method by which conversions will be counted across multiple events within a session. If this value is not provided, it will be set to `ONCE_PER_EVENT`. "createTime": "A String", # Output only. Time when this conversion event was created in the property. "custom": True or False, # Output only. If set to true, this conversion event refers to a custom event. If set to false, this conversion event refers to a default event in GA. Default events typically have special meaning in GA. Default events are usually created for you by the GA system, but in some cases can be created by property admins. Custom events count towards the maximum number of custom conversion events that may be created per property. + "defaultConversionValue": { # Defines a default value/currency for a conversion event. Both value and currency must be provided. # Optional. Defines a default value/currency for a conversion event. + "currencyCode": "A String", # When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more. + "value": 3.14, # This value will be used to populate the value for all conversions of the specified event_name where the event "value" parameter is unset. + }, "deletable": True or False, # Output only. If set, this event can currently be deleted with DeleteConversionEvent. "eventName": "A String", # Immutable. The event name for this conversion event. Examples: 'click', 'purchase' "name": "A String", # Output only. Resource name of this conversion event. Format: properties/{property}/conversionEvents/{conversion_event} diff --git a/docs/dyn/androiddeviceprovisioning_v1.partners.devices.html b/docs/dyn/androiddeviceprovisioning_v1.partners.devices.html index 56cdeee6c5f..74e9e196d2f 100644 --- a/docs/dyn/androiddeviceprovisioning_v1.partners.devices.html +++ b/docs/dyn/androiddeviceprovisioning_v1.partners.devices.html @@ -124,6 +124,7 @@

Method Details

The object takes the form of: { # Request message to claim a device on behalf of a customer. + "configurationId": "A String", # Optional. The unique identifier of the configuration (internally known as profile) to set for the section. "customerId": "A String", # The ID of the customer for whom the device is being claimed. "deviceIdentifier": { # Encapsulates hardware and product IDs to identify a manufactured device. To understand requirements on identifier sets, read [Identifiers](https://developers.google.com/zero-touch/guides/identifiers). # Required. Required. The device identifier of the device to claim. "chromeOsAttestedDeviceId": "A String", # An identifier provided by OEMs, carried through the production and sales process. Only applicable to Chrome OS devices. @@ -142,7 +143,7 @@

Method Details

"googleWorkspaceCustomerId": "A String", # The Google Workspace customer ID. "preProvisioningToken": "A String", # Optional. Must and can only be set for Chrome OS devices. "sectionType": "A String", # Required. The section type of the device's provisioning record. - "simlockProfileId": "A String", # Optional. Must and can only be set when DeviceProvisioningSectionType is SECTION_TYPE_SIM_LOCK. The unique identifier of the SimLock profile. + "simlockProfileId": "A String", # Optional. } x__xgafv: string, V1 error format. @@ -171,6 +172,7 @@

Method Details

{ # Request to claim devices asynchronously in batch. Claiming a device adds the device to zero-touch enrollment and shows the device in the customer's view of the portal. "claims": [ # Required. A list of device claims. { # Identifies one claim request. + "configurationId": "A String", # Optional. The unique identifier of the configuration (internally known as profile) to set for the section. "customerId": "A String", # The ID of the customer for whom the device is being claimed. "deviceIdentifier": { # Encapsulates hardware and product IDs to identify a manufactured device. To understand requirements on identifier sets, read [Identifiers](https://developers.google.com/zero-touch/guides/identifiers). # Required. Required. Device identifier of the device. "chromeOsAttestedDeviceId": "A String", # An identifier provided by OEMs, carried through the production and sales process. Only applicable to Chrome OS devices. @@ -189,7 +191,7 @@

Method Details

"googleWorkspaceCustomerId": "A String", # The Google Workspace customer ID. "preProvisioningToken": "A String", # Optional. Must and can only be set for Chrome OS devices. "sectionType": "A String", # Required. The section type of the device's provisioning record. - "simlockProfileId": "A String", # Optional. Must and can only be set when DeviceProvisioningSectionType is SECTION_TYPE_SIM_LOCK. The unique identifier of the SimLock profile. + "simlockProfileId": "A String", # Optional. }, ], } diff --git a/docs/dyn/androidmanagement_v1.enterprises.devices.html b/docs/dyn/androidmanagement_v1.enterprises.devices.html index b3a0f55670b..4c7e5f7530d 100644 --- a/docs/dyn/androidmanagement_v1.enterprises.devices.html +++ b/docs/dyn/androidmanagement_v1.enterprises.devices.html @@ -415,7 +415,7 @@

Method Details

"createTime": "A String", # The timestamp at which the command was created. The timestamp is automatically generated by the server. "duration": "A String", # The duration for which the command is valid. The command will expire if not executed by the device during this time. The default duration if unspecified is ten minutes. There is no maximum duration. "errorCode": "A String", # If the command failed, an error code explaining the failure. This is not set when the command is cancelled by the caller. - "newPassword": "A String", # For commands of type RESET_PASSWORD, optionally specifies the new password. + "newPassword": "A String", # For commands of type RESET_PASSWORD, optionally specifies the new password. Note: The new password must be at least 6 characters long if it is numeric in case of Android 14 devices. Else the command will fail with INVALID_VALUE. "resetPasswordFlags": [ # For commands of type RESET_PASSWORD, optionally specifies flags. "A String", ], diff --git a/docs/dyn/appengine_v1.apps.services.versions.html b/docs/dyn/appengine_v1.apps.services.versions.html index 9cd1e1e8a87..23a56973ab6 100644 --- a/docs/dyn/appengine_v1.apps.services.versions.html +++ b/docs/dyn/appengine_v1.apps.services.versions.html @@ -219,6 +219,9 @@

Method Details

"operatingSystem": "A String", # The operating system of the application runtime. "runtimeVersion": "A String", # The runtime version of an App Engine flexible application. }, + "generatedCustomerMetadata": { # Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, "handlers": [ # An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set. { # URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript. "apiEndpoint": { # Uses Google Cloud Endpoints to handle requests. # Uses API Endpoints to handle requests. @@ -518,6 +521,9 @@

Method Details

"operatingSystem": "A String", # The operating system of the application runtime. "runtimeVersion": "A String", # The runtime version of an App Engine flexible application. }, + "generatedCustomerMetadata": { # Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, "handlers": [ # An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set. { # URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript. "apiEndpoint": { # Uses Google Cloud Endpoints to handle requests. # Uses API Endpoints to handle requests. @@ -756,6 +762,9 @@

Method Details

"operatingSystem": "A String", # The operating system of the application runtime. "runtimeVersion": "A String", # The runtime version of an App Engine flexible application. }, + "generatedCustomerMetadata": { # Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, "handlers": [ # An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set. { # URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript. "apiEndpoint": { # Uses Google Cloud Endpoints to handle requests. # Uses API Endpoints to handle requests. @@ -997,6 +1006,9 @@

Method Details

"operatingSystem": "A String", # The operating system of the application runtime. "runtimeVersion": "A String", # The runtime version of an App Engine flexible application. }, + "generatedCustomerMetadata": { # Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, "handlers": [ # An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set. { # URL pattern and description of how the URL should be handled. App Engine can handle URLs by executing application code or by serving static files uploaded with the version, such as images, CSS, or JavaScript. "apiEndpoint": { # Uses Google Cloud Endpoints to handle requests. # Uses API Endpoints to handle requests. diff --git a/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html b/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html index 74c76c8bdec..a5c08e1f1c0 100644 --- a/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html +++ b/docs/dyn/assuredworkloads_v1.organizations.locations.workloads.html @@ -203,12 +203,12 @@

Method Details

"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -324,12 +324,12 @@

Method Details

"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -403,12 +403,12 @@

Method Details

"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -505,12 +505,12 @@

Method Details

"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -573,12 +573,12 @@

Method Details

"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -640,12 +640,12 @@

Method Details

"acknowledgedViolationCount": 42, # Number of current orgPolicy violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. diff --git a/docs/dyn/assuredworkloads_v1beta1.organizations.locations.workloads.html b/docs/dyn/assuredworkloads_v1beta1.organizations.locations.workloads.html index 5ff267199ba..dcfa8580cc0 100644 --- a/docs/dyn/assuredworkloads_v1beta1.organizations.locations.workloads.html +++ b/docs/dyn/assuredworkloads_v1beta1.organizations.locations.workloads.html @@ -211,12 +211,12 @@

Method Details

"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -376,12 +376,12 @@

Method Details

"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -481,12 +481,12 @@

Method Details

"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -591,12 +591,12 @@

Method Details

"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. @@ -684,12 +684,12 @@

Method Details

"activeResourceViolationCount": 42, # Number of current resource violations which are acknowledged. "activeViolationCount": 42, # Number of current orgPolicy violations which are not acknowledged. }, - "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment." + "compliantButDisallowedServices": [ # Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment. "A String", ], "createTime": "A String", # Output only. Immutable. The Workload creation timestamp. "displayName": "A String", # Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload - "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Optional. Represents the Ekm Provisioning State of the given workload. + "ekmProvisioningResponse": { # External key management systems(EKM) Provisioning response # Output only. Represents the Ekm Provisioning State of the given workload. "ekmProvisioningErrorDomain": "A String", # Indicates Ekm provisioning error if any. "ekmProvisioningErrorMapping": "A String", # Detailed error message if Ekm provisioning fails "ekmProvisioningState": "A String", # Indicates Ekm enrollment Provisioning of a given workload. diff --git a/docs/dyn/batch_v1.projects.locations.state.html b/docs/dyn/batch_v1.projects.locations.state.html index 4453c46f019..f2e91a8f705 100644 --- a/docs/dyn/batch_v1.projects.locations.state.html +++ b/docs/dyn/batch_v1.projects.locations.state.html @@ -320,6 +320,7 @@

Method Details

"taskSource": "A String", # TaskSource represents the source of the task. }, ], + "useBatchMonitoredResource": True or False, # If true, the cloud logging for batch agent will use batch.googleapis.com/Job as monitored resource for Batch job related logging. } diff --git a/docs/dyn/biglake_v1.html b/docs/dyn/biglake_v1.html new file mode 100644 index 00000000000..618319c39c2 --- /dev/null +++ b/docs/dyn/biglake_v1.html @@ -0,0 +1,111 @@ + + + +

BigLake API

+

Instance Methods

+

+ projects() +

+

Returns the projects Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ new_batch_http_request()

+

Create a BatchHttpRequest object based on the discovery document.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ new_batch_http_request() +
Create a BatchHttpRequest object based on the discovery document.
+
+                Args:
+                  callback: callable, A callback to be called for each response, of the
+                    form callback(id, response, exception). The first parameter is the
+                    request id, and the second is the deserialized response object. The
+                    third is an apiclient.errors.HttpError exception object if an HTTP
+                    error occurred while processing the request, or None if no error
+                    occurred.
+
+                Returns:
+                  A BatchHttpRequest object based on the discovery document.
+                
+
+ + \ No newline at end of file diff --git a/docs/dyn/biglake_v1.projects.html b/docs/dyn/biglake_v1.projects.html new file mode 100644 index 00000000000..a01b91f2dac --- /dev/null +++ b/docs/dyn/biglake_v1.projects.html @@ -0,0 +1,91 @@ + + + +

BigLake API . projects

+

Instance Methods

+

+ locations() +

+

Returns the locations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/biglake_v1.projects.locations.catalogs.databases.html b/docs/dyn/biglake_v1.projects.locations.catalogs.databases.html new file mode 100644 index 00000000000..78cb381cbdc --- /dev/null +++ b/docs/dyn/biglake_v1.projects.locations.catalogs.databases.html @@ -0,0 +1,318 @@ + + + +

BigLake API . projects . locations . catalogs . databases

+

Instance Methods

+

+ tables() +

+

Returns the tables Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, databaseId=None, x__xgafv=None)

+

Creates a new database.

+

+ delete(name, x__xgafv=None)

+

Deletes an existing database specified by the database ID.

+

+ get(name, x__xgafv=None)

+

Gets the database specified by the resource name.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List all databases in a specified catalog.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Updates an existing database specified by the database ID.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, databaseId=None, x__xgafv=None) +
Creates a new database.
+
+Args:
+  parent: string, Required. The parent resource where this database will be created. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Database is the container of tables.
+  "createTime": "A String", # Output only. The creation time of the database.
+  "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted.
+  "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted.
+  "hiveOptions": { # Options of a Hive database. # Options of a Hive database.
+    "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://".
+    "parameters": { # Stores user supplied Hive database parameters.
+      "a_key": "A String",
+    },
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}
+  "type": "A String", # The database type.
+  "updateTime": "A String", # Output only. The last modification time of the database.
+}
+
+  databaseId: string, Required. The ID to use for the database, which will become the final component of the database's resource name.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Database is the container of tables.
+  "createTime": "A String", # Output only. The creation time of the database.
+  "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted.
+  "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted.
+  "hiveOptions": { # Options of a Hive database. # Options of a Hive database.
+    "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://".
+    "parameters": { # Stores user supplied Hive database parameters.
+      "a_key": "A String",
+    },
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}
+  "type": "A String", # The database type.
+  "updateTime": "A String", # Output only. The last modification time of the database.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Deletes an existing database specified by the database ID.
+
+Args:
+  name: string, Required. The name of the database to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Database is the container of tables.
+  "createTime": "A String", # Output only. The creation time of the database.
+  "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted.
+  "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted.
+  "hiveOptions": { # Options of a Hive database. # Options of a Hive database.
+    "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://".
+    "parameters": { # Stores user supplied Hive database parameters.
+      "a_key": "A String",
+    },
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}
+  "type": "A String", # The database type.
+  "updateTime": "A String", # Output only. The last modification time of the database.
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the database specified by the resource name.
+
+Args:
+  name: string, Required. The name of the database to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Database is the container of tables.
+  "createTime": "A String", # Output only. The creation time of the database.
+  "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted.
+  "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted.
+  "hiveOptions": { # Options of a Hive database. # Options of a Hive database.
+    "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://".
+    "parameters": { # Stores user supplied Hive database parameters.
+      "a_key": "A String",
+    },
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}
+  "type": "A String", # The database type.
+  "updateTime": "A String", # Output only. The last modification time of the database.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List all databases in a specified catalog.
+
+Args:
+  parent: string, Required. The parent, which owns this collection of databases. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} (required)
+  pageSize: integer, The maximum number of databases to return. The service may return fewer than this value. If unspecified, at most 50 databases will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListDatabases` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDatabases` must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for the ListDatabases method.
+  "databases": [ # The databases from the specified catalog.
+    { # Database is the container of tables.
+      "createTime": "A String", # Output only. The creation time of the database.
+      "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted.
+      "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted.
+      "hiveOptions": { # Options of a Hive database. # Options of a Hive database.
+        "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://".
+        "parameters": { # Stores user supplied Hive database parameters.
+          "a_key": "A String",
+        },
+      },
+      "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}
+      "type": "A String", # The database type.
+      "updateTime": "A String", # Output only. The last modification time of the database.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Updates an existing database specified by the database ID.
+
+Args:
+  name: string, Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Database is the container of tables.
+  "createTime": "A String", # Output only. The creation time of the database.
+  "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted.
+  "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted.
+  "hiveOptions": { # Options of a Hive database. # Options of a Hive database.
+    "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://".
+    "parameters": { # Stores user supplied Hive database parameters.
+      "a_key": "A String",
+    },
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}
+  "type": "A String", # The database type.
+  "updateTime": "A String", # Output only. The last modification time of the database.
+}
+
+  updateMask: string, The list of fields to update. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Database is the container of tables.
+  "createTime": "A String", # Output only. The creation time of the database.
+  "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted.
+  "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted.
+  "hiveOptions": { # Options of a Hive database. # Options of a Hive database.
+    "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://".
+    "parameters": { # Stores user supplied Hive database parameters.
+      "a_key": "A String",
+    },
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}
+  "type": "A String", # The database type.
+  "updateTime": "A String", # Output only. The last modification time of the database.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/biglake_v1.projects.locations.catalogs.databases.tables.html b/docs/dyn/biglake_v1.projects.locations.catalogs.databases.tables.html new file mode 100644 index 00000000000..a44cb8391d4 --- /dev/null +++ b/docs/dyn/biglake_v1.projects.locations.catalogs.databases.tables.html @@ -0,0 +1,430 @@ + + + +

BigLake API . projects . locations . catalogs . databases . tables

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, tableId=None, x__xgafv=None)

+

Creates a new table.

+

+ delete(name, x__xgafv=None)

+

Deletes an existing table specified by the table ID.

+

+ get(name, x__xgafv=None)

+

Gets the table specified by the resource name.

+

+ list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None)

+

List all tables in a specified database.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Updates an existing table specified by the table ID.

+

+ rename(name, body=None, x__xgafv=None)

+

Renames an existing table specified by the table ID.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, tableId=None, x__xgafv=None) +
Creates a new table.
+
+Args:
+  parent: string, Required. The parent resource where this table will be created. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents a table.
+  "createTime": "A String", # Output only. The creation time of the table.
+  "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted.
+  "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
+  "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted.
+  "hiveOptions": { # Options of a Hive table. # Options of a Hive table.
+    "parameters": { # Stores user supplied Hive table parameters.
+      "a_key": "A String",
+    },
+    "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data.
+      "inputFormat": "A String", # The fully qualified Java class name of the input format.
+      "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://".
+      "outputFormat": "A String", # The fully qualified Java class name of the output format.
+      "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information.
+        "serializationLib": "A String", # The fully qualified Java class name of the serialization library.
+      },
+    },
+    "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+  "type": "A String", # The table type.
+  "updateTime": "A String", # Output only. The last modification time of the table.
+}
+
+  tableId: string, Required. The ID to use for the table, which will become the final component of the table's resource name.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a table.
+  "createTime": "A String", # Output only. The creation time of the table.
+  "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted.
+  "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
+  "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted.
+  "hiveOptions": { # Options of a Hive table. # Options of a Hive table.
+    "parameters": { # Stores user supplied Hive table parameters.
+      "a_key": "A String",
+    },
+    "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data.
+      "inputFormat": "A String", # The fully qualified Java class name of the input format.
+      "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://".
+      "outputFormat": "A String", # The fully qualified Java class name of the output format.
+      "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information.
+        "serializationLib": "A String", # The fully qualified Java class name of the serialization library.
+      },
+    },
+    "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+  "type": "A String", # The table type.
+  "updateTime": "A String", # Output only. The last modification time of the table.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Deletes an existing table specified by the table ID.
+
+Args:
+  name: string, Required. The name of the table to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a table.
+  "createTime": "A String", # Output only. The creation time of the table.
+  "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted.
+  "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
+  "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted.
+  "hiveOptions": { # Options of a Hive table. # Options of a Hive table.
+    "parameters": { # Stores user supplied Hive table parameters.
+      "a_key": "A String",
+    },
+    "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data.
+      "inputFormat": "A String", # The fully qualified Java class name of the input format.
+      "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://".
+      "outputFormat": "A String", # The fully qualified Java class name of the output format.
+      "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information.
+        "serializationLib": "A String", # The fully qualified Java class name of the serialization library.
+      },
+    },
+    "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+  "type": "A String", # The table type.
+  "updateTime": "A String", # Output only. The last modification time of the table.
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the table specified by the resource name.
+
+Args:
+  name: string, Required. The name of the table to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a table.
+  "createTime": "A String", # Output only. The creation time of the table.
+  "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted.
+  "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
+  "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted.
+  "hiveOptions": { # Options of a Hive table. # Options of a Hive table.
+    "parameters": { # Stores user supplied Hive table parameters.
+      "a_key": "A String",
+    },
+    "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data.
+      "inputFormat": "A String", # The fully qualified Java class name of the input format.
+      "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://".
+      "outputFormat": "A String", # The fully qualified Java class name of the output format.
+      "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information.
+        "serializationLib": "A String", # The fully qualified Java class name of the serialization library.
+      },
+    },
+    "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+  "type": "A String", # The table type.
+  "updateTime": "A String", # Output only. The last modification time of the table.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None) +
List all tables in a specified database.
+
+Args:
+  parent: string, Required. The parent, which owns this collection of tables. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required)
+  pageSize: integer, The maximum number of tables to return. The service may return fewer than this value. If unspecified, at most 50 tables will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListTables` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListTables` must match the call that provided the page token.
+  view: string, The view for the returned tables.
+    Allowed values
+      TABLE_VIEW_UNSPECIFIED - Default value. The API will default to the BASIC view.
+      BASIC - Include only table names. This is the default value.
+      FULL - Include everything.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for the ListTables method.
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+  "tables": [ # The tables from the specified database.
+    { # Represents a table.
+      "createTime": "A String", # Output only. The creation time of the table.
+      "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted.
+      "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
+      "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted.
+      "hiveOptions": { # Options of a Hive table. # Options of a Hive table.
+        "parameters": { # Stores user supplied Hive table parameters.
+          "a_key": "A String",
+        },
+        "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data.
+          "inputFormat": "A String", # The fully qualified Java class name of the input format.
+          "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://".
+          "outputFormat": "A String", # The fully qualified Java class name of the output format.
+          "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information.
+            "serializationLib": "A String", # The fully qualified Java class name of the serialization library.
+          },
+        },
+        "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
+      },
+      "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+      "type": "A String", # The table type.
+      "updateTime": "A String", # Output only. The last modification time of the table.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Updates an existing table specified by the table ID.
+
+Args:
+  name: string, Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents a table.
+  "createTime": "A String", # Output only. The creation time of the table.
+  "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted.
+  "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
+  "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted.
+  "hiveOptions": { # Options of a Hive table. # Options of a Hive table.
+    "parameters": { # Stores user supplied Hive table parameters.
+      "a_key": "A String",
+    },
+    "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data.
+      "inputFormat": "A String", # The fully qualified Java class name of the input format.
+      "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://".
+      "outputFormat": "A String", # The fully qualified Java class name of the output format.
+      "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information.
+        "serializationLib": "A String", # The fully qualified Java class name of the serialization library.
+      },
+    },
+    "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+  "type": "A String", # The table type.
+  "updateTime": "A String", # Output only. The last modification time of the table.
+}
+
+  updateMask: string, The list of fields to update. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a table.
+  "createTime": "A String", # Output only. The creation time of the table.
+  "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted.
+  "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
+  "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted.
+  "hiveOptions": { # Options of a Hive table. # Options of a Hive table.
+    "parameters": { # Stores user supplied Hive table parameters.
+      "a_key": "A String",
+    },
+    "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data.
+      "inputFormat": "A String", # The fully qualified Java class name of the input format.
+      "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://".
+      "outputFormat": "A String", # The fully qualified Java class name of the output format.
+      "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information.
+        "serializationLib": "A String", # The fully qualified Java class name of the serialization library.
+      },
+    },
+    "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+  "type": "A String", # The table type.
+  "updateTime": "A String", # Output only. The last modification time of the table.
+}
+
+ +
+ rename(name, body=None, x__xgafv=None) +
Renames an existing table specified by the table ID.
+
+Args:
+  name: string, Required. The table's `name` field is used to identify the table to rename. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for the RenameTable method in MetastoreService
+  "newName": "A String", # Required. The new `name` for the specified table, must be in the same database. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a table.
+  "createTime": "A String", # Output only. The creation time of the table.
+  "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted.
+  "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.
+  "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted.
+  "hiveOptions": { # Options of a Hive table. # Options of a Hive table.
+    "parameters": { # Stores user supplied Hive table parameters.
+      "a_key": "A String",
+    },
+    "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data.
+      "inputFormat": "A String", # The fully qualified Java class name of the input format.
+      "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://".
+      "outputFormat": "A String", # The fully qualified Java class name of the output format.
+      "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information.
+        "serializationLib": "A String", # The fully qualified Java class name of the serialization library.
+      },
+    },
+    "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.
+  },
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}
+  "type": "A String", # The table type.
+  "updateTime": "A String", # Output only. The last modification time of the table.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/biglake_v1.projects.locations.catalogs.html b/docs/dyn/biglake_v1.projects.locations.catalogs.html new file mode 100644 index 00000000000..5e6fcddfdf5 --- /dev/null +++ b/docs/dyn/biglake_v1.projects.locations.catalogs.html @@ -0,0 +1,231 @@ + + + +

BigLake API . projects . locations . catalogs

+

Instance Methods

+

+ databases() +

+

Returns the databases Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, catalogId=None, x__xgafv=None)

+

Creates a new catalog.

+

+ delete(name, x__xgafv=None)

+

Deletes an existing catalog specified by the catalog ID.

+

+ get(name, x__xgafv=None)

+

Gets the catalog specified by the resource name.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List all catalogs in a specified project.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, catalogId=None, x__xgafv=None) +
Creates a new catalog.
+
+Args:
+  parent: string, Required. The parent resource where this catalog will be created. Format: projects/{project_id_or_number}/locations/{location_id} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Catalog is the container of databases.
+  "createTime": "A String", # Output only. The creation time of the catalog.
+  "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted.
+  "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted.
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}
+  "updateTime": "A String", # Output only. The last modification time of the catalog.
+}
+
+  catalogId: string, Required. The ID to use for the catalog, which will become the final component of the catalog's resource name.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Catalog is the container of databases.
+  "createTime": "A String", # Output only. The creation time of the catalog.
+  "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted.
+  "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted.
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}
+  "updateTime": "A String", # Output only. The last modification time of the catalog.
+}
+
+ +
+ delete(name, x__xgafv=None) +
Deletes an existing catalog specified by the catalog ID.
+
+Args:
+  name: string, Required. The name of the catalog to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Catalog is the container of databases.
+  "createTime": "A String", # Output only. The creation time of the catalog.
+  "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted.
+  "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted.
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}
+  "updateTime": "A String", # Output only. The last modification time of the catalog.
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the catalog specified by the resource name.
+
+Args:
+  name: string, Required. The name of the catalog to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Catalog is the container of databases.
+  "createTime": "A String", # Output only. The creation time of the catalog.
+  "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted.
+  "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted.
+  "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}
+  "updateTime": "A String", # Output only. The last modification time of the catalog.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List all catalogs in a specified project.
+
+Args:
+  parent: string, Required. The parent, which owns this collection of catalogs. Format: projects/{project_id_or_number}/locations/{location_id} (required)
+  pageSize: integer, The maximum number of catalogs to return. The service may return fewer than this value. If unspecified, at most 50 catalogs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, A page token, received from a previous `ListCatalogs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListCatalogs` must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for the ListCatalogs method.
+  "catalogs": [ # The catalogs from the specified project.
+    { # Catalog is the container of databases.
+      "createTime": "A String", # Output only. The creation time of the catalog.
+      "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted.
+      "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted.
+      "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}
+      "updateTime": "A String", # Output only. The last modification time of the catalog.
+    },
+  ],
+  "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/biglake_v1.projects.locations.html b/docs/dyn/biglake_v1.projects.locations.html new file mode 100644 index 00000000000..8e5411a331b --- /dev/null +++ b/docs/dyn/biglake_v1.projects.locations.html @@ -0,0 +1,91 @@ + + + +

BigLake API . projects . locations

+

Instance Methods

+

+ catalogs() +

+

Returns the catalogs Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1.billingAccounts.html b/docs/dyn/cloudbilling_v1.billingAccounts.html index 9ef9f0c1953..51224703cce 100644 --- a/docs/dyn/cloudbilling_v1.billingAccounts.html +++ b/docs/dyn/cloudbilling_v1.billingAccounts.html @@ -79,11 +79,16 @@

Instance Methods

Returns the projects Resource.

+

+ subAccounts() +

+

Returns the subAccounts Resource.

+

close()

Close httplib2 connections.

- create(body=None, x__xgafv=None)

+ create(body=None, parent=None, x__xgafv=None)

This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.

get(name, x__xgafv=None)

@@ -92,11 +97,14 @@

Instance Methods

getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

Gets the access control policy for a billing account. The caller must have the `billing.accounts.getIamPolicy` permission on the account, which is often given to billing account [viewers](https://cloud.google.com/billing/docs/how-to/billing-access).

- list(filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+ list(filter=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)

Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).

list_next()

Retrieves the next page of results.

+

+ move(name, body=None, x__xgafv=None)

+

Changes which parent organization a billing account belongs to.

patch(name, body=None, updateMask=None, x__xgafv=None)

Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account.

@@ -113,7 +121,7 @@

Method Details

- create(body=None, x__xgafv=None) + create(body=None, parent=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
 
 Args:
@@ -127,6 +135,7 @@ 

Method Details

"open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. } + parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -214,13 +223,14 @@

Method Details

- list(filter=None, pageSize=None, pageToken=None, x__xgafv=None) + list(filter=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
 
 Args:
   filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported.
   pageSize: integer, Requested page size. The maximum page size is 100; this is also the default.
   pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.
+  parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -256,6 +266,35 @@ 

Method Details

+
+ move(name, body=None, x__xgafv=None) +
Changes which parent organization a billing account belongs to.
+
+Args:
+  name: string, Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for `MoveBillingAccount` RPC.
+  "destinationParent": "A String", # Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.
+  "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
+  "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
+  "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
+  "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
+}
+
+
patch(name, body=None, updateMask=None, x__xgafv=None)
Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account.
diff --git a/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html b/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html
new file mode 100644
index 00000000000..b71c2d190ee
--- /dev/null
+++ b/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html
@@ -0,0 +1,171 @@
+
+
+
+

Cloud Billing API . billingAccounts . subAccounts

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.

+

+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, x__xgafv=None) +
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
+
+Args:
+  parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.
+  "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
+  "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
+  "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
+  "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.
+  "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
+  "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
+  "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
+  "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
+}
+
+ +
+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
+
+Args:
+  parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required)
+  filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported.
+  pageSize: integer, Requested page size. The maximum page size is 100; this is also the default.
+  pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for `ListBillingAccounts`.
+  "billingAccounts": [ # A list of billing accounts.
+    { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.
+      "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
+      "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
+      "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
+      "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
+    },
+  ],
+  "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1.html b/docs/dyn/cloudbilling_v1.html index 1d4da4cfcfe..a8b7320e249 100644 --- a/docs/dyn/cloudbilling_v1.html +++ b/docs/dyn/cloudbilling_v1.html @@ -79,6 +79,11 @@

Instance Methods

Returns the billingAccounts Resource.

+

+ organizations() +

+

Returns the organizations Resource.

+

projects()

diff --git a/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html b/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html new file mode 100644 index 00000000000..0a77281241d --- /dev/null +++ b/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html @@ -0,0 +1,197 @@ + + + +

Cloud Billing API . organizations . billingAccounts

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.

+

+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).

+

+ list_next()

+

Retrieves the next page of results.

+

+ move(destinationParent, name, x__xgafv=None)

+

Changes which parent organization a billing account belongs to.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, x__xgafv=None) +
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
+
+Args:
+  parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.
+  "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
+  "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
+  "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
+  "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.
+  "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
+  "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
+  "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
+  "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
+}
+
+ +
+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
+
+Args:
+  parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required)
+  filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported.
+  pageSize: integer, Requested page size. The maximum page size is 100; this is also the default.
+  pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for `ListBillingAccounts`.
+  "billingAccounts": [ # A list of billing accounts.
+    { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.
+      "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
+      "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
+      "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
+      "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
+    },
+  ],
+  "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ move(destinationParent, name, x__xgafv=None) +
Changes which parent organization a billing account belongs to.
+
+Args:
+  destinationParent: string, Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`. (required)
+  name: string, Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects.
+  "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
+  "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
+  "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
+  "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1.organizations.html b/docs/dyn/cloudbilling_v1.organizations.html new file mode 100644 index 00000000000..18fdabc9c7c --- /dev/null +++ b/docs/dyn/cloudbilling_v1.organizations.html @@ -0,0 +1,91 @@ + + + +

Cloud Billing API . organizations

+

Instance Methods

+

+ billingAccounts() +

+

Returns the billingAccounts Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/cloudtasks_v2.projects.locations.html b/docs/dyn/cloudtasks_v2.projects.locations.html index bba06f9b352..fd11c8e9af7 100644 --- a/docs/dyn/cloudtasks_v2.projects.locations.html +++ b/docs/dyn/cloudtasks_v2.projects.locations.html @@ -144,7 +144,7 @@

Method Details

Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }
@@ -208,7 +208,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. +{ # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` } @@ -222,7 +222,7 @@

Method Details

Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` } diff --git a/docs/dyn/cloudtasks_v2.projects.locations.queues.html b/docs/dyn/cloudtasks_v2.projects.locations.queues.html index 6dcaef26556..5eea45da38f 100644 --- a/docs/dyn/cloudtasks_v2.projects.locations.queues.html +++ b/docs/dyn/cloudtasks_v2.projects.locations.queues.html @@ -87,7 +87,7 @@

Instance Methods

Creates a queue. Queues created with this method allow tasks to live for a maximum of 31 days. After a task is 31 days old, the task will be deleted regardless of whether it was dispatched or not. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.

delete(name, x__xgafv=None)

-

Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.

+

Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.

get(name, x__xgafv=None)

Gets a queue.

@@ -198,7 +198,7 @@

Method Details

delete(name, x__xgafv=None) -
Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
+  
Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
 
 Args:
   name: string, Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` (required)
diff --git a/docs/dyn/cloudtasks_v2beta2.projects.locations.html b/docs/dyn/cloudtasks_v2beta2.projects.locations.html
index 4835bd73b56..93dd5dd6bd0 100644
--- a/docs/dyn/cloudtasks_v2beta2.projects.locations.html
+++ b/docs/dyn/cloudtasks_v2beta2.projects.locations.html
@@ -144,7 +144,7 @@ 

Method Details

Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }
@@ -208,7 +208,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. +{ # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` } @@ -222,7 +222,7 @@

Method Details

Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }
diff --git a/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html b/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html index f35de98fdd2..26ccc9ddb32 100644 --- a/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html +++ b/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html @@ -79,7 +79,7 @@

Instance Methods

Acknowledges a pull task. The worker, that is, the entity that leased this task must call this method to indicate that the work associated with the task has finished. The worker must acknowledge a task within the lease_duration or the lease will expire and the task will become available to be leased again. After the task is acknowledged, it will not be returned by a later LeaseTasks, GetTask, or ListTasks.

buffer(queue, taskId, body=None, x__xgafv=None)

-

Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8).

+

Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer.

cancelLease(name, body=None, x__xgafv=None)

Cancel a pull task's lease. The worker can use this method to cancel a task's lease by setting its schedule_time to now. This will make the task available to be leased to the next caller of LeaseTasks.

@@ -138,7 +138,7 @@

Method Details

buffer(queue, taskId, body=None, x__xgafv=None) -
Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8).
+  
Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer.
 
 Args:
   queue: string, Required. The parent queue name. For example: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` The queue must already exist. (required)
diff --git a/docs/dyn/cloudtasks_v2beta3.projects.locations.html b/docs/dyn/cloudtasks_v2beta3.projects.locations.html
index fe3ecf906ff..ff79a5b942d 100644
--- a/docs/dyn/cloudtasks_v2beta3.projects.locations.html
+++ b/docs/dyn/cloudtasks_v2beta3.projects.locations.html
@@ -144,7 +144,7 @@ 

Method Details

Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }
@@ -208,7 +208,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. +{ # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` } @@ -222,7 +222,7 @@

Method Details

Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }
diff --git a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.html b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.html index 450073374fc..8d4289fdc6a 100644 --- a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.html +++ b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.html @@ -87,7 +87,7 @@

Instance Methods

Creates a queue. Queues created with this method allow tasks to live for a maximum of 31 days. After a task is 31 days old, the task will be deleted regardless of whether it was dispatched or not. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.

delete(name, x__xgafv=None)

-

Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.

+

Deletes a queue. This command will delete the queue even if it has tasks in it. Note : If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.

get(name, readMask=None, x__xgafv=None)

Gets a queue.

@@ -284,7 +284,7 @@

Method Details

delete(name, x__xgafv=None) -
Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
+  
Deletes a queue. This command will delete the queue even if it has tasks in it. Note : If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
 
 Args:
   name: string, Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` (required)
diff --git a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html
index 7eaa3977830..f465c41b0bd 100644
--- a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html
+++ b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html
@@ -76,7 +76,7 @@ 

Cloud Tasks API . buffer(queue, taskId, body=None, x__xgafv=None)

-

Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8).

+

Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer.

close()

Close httplib2 connections.

@@ -101,7 +101,7 @@

Instance Methods

Method Details

buffer(queue, taskId, body=None, x__xgafv=None) -
Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8).
+  
Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer.
 
 Args:
   queue: string, Required. The parent queue name. For example: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` The queue must already exist. (required)
diff --git a/docs/dyn/compute_alpha.backendServices.html b/docs/dyn/compute_alpha.backendServices.html
index 893ad98a1d7..88e12120e2f 100644
--- a/docs/dyn/compute_alpha.backendServices.html
+++ b/docs/dyn/compute_alpha.backendServices.html
@@ -418,7 +418,7 @@ 

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1162,7 +1162,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1748,7 +1748,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2316,7 +2316,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2782,7 +2782,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -3264,7 +3264,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -4324,7 +4324,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. diff --git a/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html b/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html index 7fa514bb510..31c566477c9 100644 --- a/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html +++ b/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html @@ -127,7 +127,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -402,7 +401,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -920,7 +918,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/docs/dyn/compute_alpha.instances.html b/docs/dyn/compute_alpha.instances.html index 586ad69f55c..a76ab2114c2 100644 --- a/docs/dyn/compute_alpha.instances.html +++ b/docs/dyn/compute_alpha.instances.html @@ -101,6 +101,9 @@

Instance Methods

deleteAccessConfig(project, zone, instance, accessConfig, networkInterface, requestId=None, x__xgafv=None)

Deletes an access config from an instance's network interface.

+

+ deleteNetworkInterface(project, zone, instance, networkInterfaceName, requestId=None, x__xgafv=None)

+

Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.

detachDisk(project, zone, instance, deviceName, requestId=None, x__xgafv=None)

Detaches a disk from an instance.

@@ -821,6 +824,9 @@

Method Details

"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -1956,6 +1962,133 @@

Method Details

}
+
+ deleteNetworkInterface(project, zone, instance, networkInterfaceName, requestId=None, x__xgafv=None) +
Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, The name of the zone for this request. (required)
+  instance: string, The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer. (required)
+  networkInterfaceName: string, The name of the network interface to be deleted from the instance. Only VLAN network interface deletion is supported. (required)
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/alpha/globalOperations) * [Regional](/compute/docs/reference/rest/alpha/regionOperations) * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request. Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is in RFC3339 text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation, this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error. Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {"instanceLimit": "100/request"}, should be returned as, {"instanceLimitPerRequest": "100"}, if the client exceeds the number of instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a `404` means the resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested. This value is in RFC3339 text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key). Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # [Output Only] Type of the resource. Always `compute#operation` for Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # [Output Only] An ID that represents a group of operations, such as when a group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`, `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id.
+  "setCommonInstanceMetadataOperationMetadata": { # [Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key). Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # [Output Only] If state is `ABANDONED` or `FAILED`, this field is populated.
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the following: `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only applicable when performing per-zone operations.
+}
+
+
detachDisk(project, zone, instance, deviceName, requestId=None, x__xgafv=None)
Detaches a disk from an instance.
@@ -2373,6 +2506,9 @@ 

Method Details

"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -3387,6 +3523,9 @@

Method Details

"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -3923,6 +4062,9 @@

Method Details

"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -7913,6 +8055,9 @@

Method Details

"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. diff --git a/docs/dyn/compute_alpha.networkEndpointGroups.html b/docs/dyn/compute_alpha.networkEndpointGroups.html index 25a04229089..452f84d77cf 100644 --- a/docs/dyn/compute_alpha.networkEndpointGroups.html +++ b/docs/dyn/compute_alpha.networkEndpointGroups.html @@ -262,7 +262,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -539,7 +538,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -1032,7 +1030,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], @@ -1084,7 +1081,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/docs/dyn/compute_alpha.regionBackendServices.html b/docs/dyn/compute_alpha.regionBackendServices.html index 04515df5d88..7830bd20995 100644 --- a/docs/dyn/compute_alpha.regionBackendServices.html +++ b/docs/dyn/compute_alpha.regionBackendServices.html @@ -391,7 +391,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -980,7 +980,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1549,7 +1549,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2016,7 +2016,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2499,7 +2499,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -3431,7 +3431,7 @@

Method Details

"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. diff --git a/docs/dyn/compute_alpha.regionCommitments.html b/docs/dyn/compute_alpha.regionCommitments.html index 0d7a28a0351..c3630aa3808 100644 --- a/docs/dyn/compute_alpha.regionCommitments.html +++ b/docs/dyn/compute_alpha.regionCommitments.html @@ -143,6 +143,9 @@

Method Details

"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -607,6 +610,9 @@

Method Details

"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -764,6 +770,9 @@

Method Details

"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -1051,6 +1060,9 @@

Method Details

"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -1270,6 +1282,9 @@

Method Details

"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. diff --git a/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html b/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html index 0a427d9ef5b..c36081ce1b6 100644 --- a/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html +++ b/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html @@ -128,7 +128,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -405,7 +404,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -927,7 +925,6 @@

Method Details

"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html b/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html index 59c9f2036ce..4f29dff02b0 100644 --- a/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html +++ b/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html @@ -129,6 +129,10 @@

Method Details

"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -234,6 +238,10 @@

Method Details

"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -281,6 +289,10 @@

Method Details

"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -337,6 +349,10 @@

Method Details

"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. diff --git a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html index 5caac806cf1..b1bc66442f4 100644 --- a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html +++ b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html @@ -443,6 +443,7 @@

Method Details

"valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. }, "enrichmentSupported": True or False, # Enrichment Supported. + "eventListenerType": "A String", # The type of the event listener for a specific connector. "isEventingSupported": True or False, # Is Eventing Supported. "registrationDestinationConfig": { # DestinationConfigTemplate defines required destinations supported by the Connector. # Registration host destination config template. "defaultPort": 42, # The default port. @@ -907,6 +908,7 @@

Method Details

"valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. }, "enrichmentSupported": True or False, # Enrichment Supported. + "eventListenerType": "A String", # The type of the event listener for a specific connector. "isEventingSupported": True or False, # Is Eventing Supported. "registrationDestinationConfig": { # DestinationConfigTemplate defines required destinations supported by the Connector. # Registration host destination config template. "defaultPort": 42, # The default port. diff --git a/docs/dyn/content_v2_1.reports.html b/docs/dyn/content_v2_1.reports.html index b1b6dde565e..1f5c2fb248c 100644 --- a/docs/dyn/content_v2_1.reports.html +++ b/docs/dyn/content_v2_1.reports.html @@ -186,10 +186,10 @@

Method Details

"priceInsights": { # Price insights fields requested by the merchant in the query. Field values are only set if the merchant queries `PriceInsightsProductView`. https://support.google.com/merchants/answer/11916926 # Price insights fields requested by the merchant in the query. Field values are only set if the merchant queries `PriceInsightsProductView`. "predictedClicksChangeFraction": 3.14, # The predicted change in clicks as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in clicks. "predictedConversionsChangeFraction": 3.14, # The predicted change in conversions as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in conversions). - "predictedGrossProfitChangeFraction": 3.14, # The predicted change in gross profit as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in gross profit. + "predictedGrossProfitChangeFraction": 3.14, # *Deprecated*: This field is no longer supported and will start returning 0. The predicted change in gross profit as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in gross profit. "predictedImpressionsChangeFraction": 3.14, # The predicted change in impressions as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in impressions. - "predictedMonthlyGrossProfitChangeCurrencyCode": "A String", # The predicted monthly gross profit change currency (ISO 4217 code). - "predictedMonthlyGrossProfitChangeMicros": "A String", # The predicted change in gross profit in micros (1 millionth of a standard unit, 1 USD = 1000000 micros) after introducing the suggested price for a month compared to current active price. + "predictedMonthlyGrossProfitChangeCurrencyCode": "A String", # *Deprecated*: This field is no longer supported and will start returning USD for all requests. The predicted monthly gross profit change currency (ISO 4217 code). + "predictedMonthlyGrossProfitChangeMicros": "A String", # *Deprecated*: This field is no longer supported and will start returning 0. The predicted change in gross profit in micros (1 millionth of a standard unit, 1 USD = 1000000 micros) after introducing the suggested price for a month compared to current active price. "suggestedPriceCurrencyCode": "A String", # The suggested price currency (ISO 4217 code). "suggestedPriceMicros": "A String", # The latest suggested price in micros (1 millionth of a standard unit, 1 USD = 1000000 micros) for the product. }, diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.html b/docs/dyn/dataflow_v1b3.projects.jobs.html index a6b67718200..4b32309260c 100644 --- a/docs/dyn/dataflow_v1b3.projects.jobs.html +++ b/docs/dyn/dataflow_v1b3.projects.jobs.html @@ -450,6 +450,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -803,6 +804,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1140,6 +1142,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1484,6 +1487,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1886,6 +1890,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2280,6 +2285,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2611,6 +2617,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html index ff48348ee93..7f83d220197 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html @@ -518,6 +518,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.jobs.html b/docs/dyn/dataflow_v1b3.projects.locations.jobs.html index 14a22ed084f..8bc7fdb2a57 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.jobs.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.jobs.html @@ -438,6 +438,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -774,6 +775,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1118,6 +1120,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1628,6 +1631,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2024,6 +2028,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2354,6 +2359,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.templates.html b/docs/dyn/dataflow_v1b3.projects.locations.templates.html index e269bfeeb9d..93ebcf2de23 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.templates.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.templates.html @@ -435,6 +435,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -913,6 +914,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.templates.html b/docs/dyn/dataflow_v1b3.projects.templates.html index 5b87b4c0768..a72f84cdf09 100644 --- a/docs/dyn/dataflow_v1b3.projects.templates.html +++ b/docs/dyn/dataflow_v1b3.projects.templates.html @@ -434,6 +434,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -912,6 +913,7 @@

Method Details

"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataplex_v1.projects.locations.dataScans.html b/docs/dyn/dataplex_v1.projects.locations.dataScans.html index 87013fad0f2..21665fad897 100644 --- a/docs/dyn/dataplex_v1.projects.locations.dataScans.html +++ b/docs/dyn/dataplex_v1.projects.locations.dataScans.html @@ -212,7 +212,7 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -531,7 +531,7 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -834,7 +834,7 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -1098,7 +1098,7 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -1379,7 +1379,7 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" diff --git a/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html b/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html index c13e15933e2..42afdd80614 100644 --- a/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html +++ b/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html @@ -191,7 +191,7 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -423,7 +423,7 @@

Method Details

"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.html b/docs/dyn/dataproc_v1.projects.locations.batches.html index 5b6849248c0..89ec5d48ffb 100644 --- a/docs/dyn/dataproc_v1.projects.locations.batches.html +++ b/docs/dyn/dataproc_v1.projects.locations.batches.html @@ -167,10 +167,14 @@

Method Details

}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -356,10 +360,14 @@

Method Details

}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -503,10 +511,14 @@

Method Details

}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.html b/docs/dyn/dataproc_v1.projects.locations.sessions.html index e210b0d9d25..a6a74d771d6 100644 --- a/docs/dyn/dataproc_v1.projects.locations.sessions.html +++ b/docs/dyn/dataproc_v1.projects.locations.sessions.html @@ -155,10 +155,14 @@

Method Details

}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -312,10 +316,14 @@

Method Details

}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -409,10 +417,14 @@

Method Details

}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) diff --git a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html index fad3a29cc5b..90cc8926789 100644 --- a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html @@ -128,6 +128,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -802,6 +805,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -1503,6 +1509,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2268,6 +2277,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2983,6 +2995,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -3646,6 +3661,9 @@

Method Details

"version": 42, # Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. }, ], + "unreachable": [ # Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response. + "A String", + ], }
@@ -3763,6 +3781,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -4437,6 +4458,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. diff --git a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html index 59bf07ace4f..84b72fa0a21 100644 --- a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html @@ -128,6 +128,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -802,6 +805,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -1503,6 +1509,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2268,6 +2277,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2983,6 +2995,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -3646,6 +3661,9 @@

Method Details

"version": 42, # Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. }, ], + "unreachable": [ # Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response. + "A String", + ], }
@@ -3763,6 +3781,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -4437,6 +4458,9 @@

Method Details

{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. diff --git a/docs/dyn/dlp_v2.organizations.deidentifyTemplates.html b/docs/dyn/dlp_v2.organizations.deidentifyTemplates.html index 7cdc2affc0f..eddd5466274 100644 --- a/docs/dyn/dlp_v2.organizations.deidentifyTemplates.html +++ b/docs/dyn/dlp_v2.organizations.deidentifyTemplates.html @@ -2752,9 +2752,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -3628,7 +3628,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDeidentifyTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDeidentifyTemplates request. }
diff --git a/docs/dyn/dlp_v2.organizations.inspectTemplates.html b/docs/dyn/dlp_v2.organizations.inspectTemplates.html index 3ba5ff1526a..41dc5ac4e41 100644 --- a/docs/dyn/dlp_v2.organizations.inspectTemplates.html +++ b/docs/dyn/dlp_v2.organizations.inspectTemplates.html @@ -721,9 +721,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -920,7 +920,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListInspectTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListInspectTemplates request. }
diff --git a/docs/dyn/dlp_v2.organizations.locations.deidentifyTemplates.html b/docs/dyn/dlp_v2.organizations.locations.deidentifyTemplates.html index 09fb8dd08a5..93cb404d3c3 100644 --- a/docs/dyn/dlp_v2.organizations.locations.deidentifyTemplates.html +++ b/docs/dyn/dlp_v2.organizations.locations.deidentifyTemplates.html @@ -2752,9 +2752,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -3628,7 +3628,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDeidentifyTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDeidentifyTemplates request. }
diff --git a/docs/dyn/dlp_v2.organizations.locations.discoveryConfigs.html b/docs/dyn/dlp_v2.organizations.locations.discoveryConfigs.html index 7e5773f37cc..6c61751bc76 100644 --- a/docs/dyn/dlp_v2.organizations.locations.discoveryConfigs.html +++ b/docs/dyn/dlp_v2.organizations.locations.discoveryConfigs.html @@ -79,22 +79,22 @@

Instance Methods

Close httplib2 connections.

create(parent, body=None, x__xgafv=None)

-

Creates a config for Discovery to scan and profile storage.

+

Creates a config for discovery to scan and profile storage.

delete(name, x__xgafv=None)

-

Deletes a Discovery configuration.

+

Deletes a discovery configuration.

get(name, x__xgafv=None)

-

Gets a Discovery configuration.

+

Gets a discovery configuration.

list(parent, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists Discovery configurations.

+

Lists discovery configurations.

list_next()

Retrieves the next page of results.

patch(name, body=None, x__xgafv=None)

-

Updates a Discovery configuration.

+

Updates a discovery configuration.

Method Details

close() @@ -103,7 +103,7 @@

Method Details

create(parent, body=None, x__xgafv=None) -
Creates a config for Discovery to scan and profile storage.
+  
Creates a config for discovery to scan and profile storage.
 
 Args:
   parent: string, Required. Parent resource name. The format of this value is as follows: `projects/`PROJECT_ID`/locations/`LOCATION_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required)
@@ -111,8 +111,8 @@ 

Method Details

The object takes the form of: { # Request message for CreateDiscoveryConfig. - "configId": "A String", # The config id can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one. - "discoveryConfig": { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. The DiscoveryConfig to create. + "configId": "A String", # The config ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one. + "discoveryConfig": { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. The DiscoveryConfig to create. "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -158,13 +158,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -173,7 +173,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -188,22 +188,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -233,7 +233,7 @@

Method Details

Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -279,13 +279,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -294,7 +294,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -309,22 +309,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -348,7 +348,7 @@

Method Details

delete(name, x__xgafv=None) -
Deletes a Discovery configuration.
+  
Deletes a discovery configuration.
 
 Args:
   name: string, Required. Resource name of the project and the config, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required)
@@ -366,7 +366,7 @@ 

Method Details

get(name, x__xgafv=None) -
Gets a Discovery configuration.
+  
Gets a discovery configuration.
 
 Args:
   name: string, Required. Resource name of the project and the configuration, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required)
@@ -378,7 +378,7 @@ 

Method Details

Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -424,13 +424,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -439,7 +439,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -454,22 +454,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -493,13 +493,13 @@

Method Details

list(parent, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists Discovery configurations.
+  
Lists discovery configurations.
 
 Args:
   parent: string, Required. Parent resource name. The format of this value is as follows: `projects/`PROJECT_ID`/locations/`LOCATION_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required)
-  orderBy: string, Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status.
-  pageSize: integer, Size of the page, can be limited by a server.
-  pageToken: string, Page token to continue retrieval. Comes from previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls.
+  orderBy: string, Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status.
+  pageSize: integer, Size of the page. This value can be limited by a server.
+  pageToken: string, Page token to continue retrieval. Comes from the previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -510,7 +510,7 @@ 

Method Details

{ # Response message for ListDiscoveryConfigs. "discoveryConfigs": [ # List of configs, up to page_size in ListDiscoveryConfigsRequest. - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -556,13 +556,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -571,7 +571,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -586,22 +586,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -622,7 +622,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of a DiscoveryConfig. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDiscoveryConfigs request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDiscoveryConfigs request. }
@@ -642,7 +642,7 @@

Method Details

patch(name, body=None, x__xgafv=None) -
Updates a Discovery configuration.
+  
Updates a discovery configuration.
 
 Args:
   name: string, Required. Resource name of the project and the configuration, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required)
@@ -650,7 +650,7 @@ 

Method Details

The object takes the form of: { # Request message for UpdateDiscoveryConfig. - "discoveryConfig": { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # New DiscoveryConfig value. + "discoveryConfig": { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. New DiscoveryConfig value. "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -696,13 +696,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -711,7 +711,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -726,22 +726,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -772,7 +772,7 @@

Method Details

Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -818,13 +818,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -833,7 +833,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -848,22 +848,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. diff --git a/docs/dyn/dlp_v2.organizations.locations.dlpJobs.html b/docs/dyn/dlp_v2.organizations.locations.dlpJobs.html index 727d77ccac3..4ddb7a33d47 100644 --- a/docs/dyn/dlp_v2.organizations.locations.dlpJobs.html +++ b/docs/dyn/dlp_v2.organizations.locations.dlpJobs.html @@ -97,7 +97,7 @@

Method Details

parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect jobs: - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - `trigger_name` - The name of the trigger that created the job. - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * Supported fields for risk analysis jobs: - `state` - RUNNING|CANCELED|FINISHED|FAILED - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * The operator must be `=` or `!=`. Examples: * inspected_storage = cloud_storage AND state = done * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = done OR state = canceled) * end_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` pageSize: integer, The standard list page size. pageToken: string, The standard list page token. type: string, The type of job. Defaults to `DlpJobType.INSPECT` diff --git a/docs/dyn/dlp_v2.organizations.locations.inspectTemplates.html b/docs/dyn/dlp_v2.organizations.locations.inspectTemplates.html index 9f361ad4927..82d04a60458 100644 --- a/docs/dyn/dlp_v2.organizations.locations.inspectTemplates.html +++ b/docs/dyn/dlp_v2.organizations.locations.inspectTemplates.html @@ -721,9 +721,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -920,7 +920,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListInspectTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListInspectTemplates request. }
diff --git a/docs/dyn/dlp_v2.organizations.locations.jobTriggers.html b/docs/dyn/dlp_v2.organizations.locations.jobTriggers.html index 8214312a598..7645673ecb6 100644 --- a/docs/dyn/dlp_v2.organizations.locations.jobTriggers.html +++ b/docs/dyn/dlp_v2.organizations.locations.jobTriggers.html @@ -1184,9 +1184,9 @@

Method Details

parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect triggers: - `status` - HEALTHY|PAUSED|CANCELLED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - 'last_run_time` - RFC 3339 formatted timestamp, surrounded by quotation marks. Nanoseconds are ignored. - 'error_count' - Number of errors that have occurred while running. * The operator must be `=` or `!=` for status and inspected_storage. Examples: * inspected_storage = cloud_storage AND status = HEALTHY * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = PAUSED OR state = HEALTHY) * last_run_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. - pageSize: integer, Size of the page, can be limited by a server. - pageToken: string, Page token to continue retrieval. Comes from previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. + orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. + pageSize: integer, Size of the page. This value can be limited by a server. + pageToken: string, Page token to continue retrieval. Comes from the previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. type: string, The type of jobs. Will use `DlpJobType.INSPECT` if not set. Allowed values DLP_JOB_TYPE_UNSPECIFIED - Defaults to INSPECT_JOB. @@ -1542,7 +1542,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of a triggeredJob. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListJobTriggers request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListJobTriggers request. }
diff --git a/docs/dyn/dlp_v2.organizations.locations.storedInfoTypes.html b/docs/dyn/dlp_v2.organizations.locations.storedInfoTypes.html index 74e7856ea9c..6fed1d1ed58 100644 --- a/docs/dyn/dlp_v2.organizations.locations.storedInfoTypes.html +++ b/docs/dyn/dlp_v2.organizations.locations.storedInfoTypes.html @@ -466,9 +466,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -478,7 +478,7 @@

Method Details

An object of the form: { # Response message for ListStoredInfoTypes. - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListStoredInfoTypes request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListStoredInfoTypes request. "storedInfoTypes": [ # List of storedInfoTypes, up to page_size in ListStoredInfoTypesRequest. { # StoredInfoType resource message that contains information about the current version and any pending updates. "currentVersion": { # Version of a StoredInfoType, including the configuration used to build it, create timestamp, and current state. # Current version of the stored info type. diff --git a/docs/dyn/dlp_v2.organizations.storedInfoTypes.html b/docs/dyn/dlp_v2.organizations.storedInfoTypes.html index 68de211ec57..812c15bb642 100644 --- a/docs/dyn/dlp_v2.organizations.storedInfoTypes.html +++ b/docs/dyn/dlp_v2.organizations.storedInfoTypes.html @@ -466,9 +466,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -478,7 +478,7 @@

Method Details

An object of the form: { # Response message for ListStoredInfoTypes. - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListStoredInfoTypes request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListStoredInfoTypes request. "storedInfoTypes": [ # List of storedInfoTypes, up to page_size in ListStoredInfoTypesRequest. { # StoredInfoType resource message that contains information about the current version and any pending updates. "currentVersion": { # Version of a StoredInfoType, including the configuration used to build it, create timestamp, and current state. # Current version of the stored info type. diff --git a/docs/dyn/dlp_v2.projects.deidentifyTemplates.html b/docs/dyn/dlp_v2.projects.deidentifyTemplates.html index 9bdd468b23e..531c5db5d0e 100644 --- a/docs/dyn/dlp_v2.projects.deidentifyTemplates.html +++ b/docs/dyn/dlp_v2.projects.deidentifyTemplates.html @@ -2752,9 +2752,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -3628,7 +3628,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDeidentifyTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDeidentifyTemplates request. }
diff --git a/docs/dyn/dlp_v2.projects.dlpJobs.html b/docs/dyn/dlp_v2.projects.dlpJobs.html index 5c2d308515c..3a5b083a256 100644 --- a/docs/dyn/dlp_v2.projects.dlpJobs.html +++ b/docs/dyn/dlp_v2.projects.dlpJobs.html @@ -8042,7 +8042,7 @@

Method Details

parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect jobs: - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - `trigger_name` - The name of the trigger that created the job. - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * Supported fields for risk analysis jobs: - `state` - RUNNING|CANCELED|FINISHED|FAILED - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * The operator must be `=` or `!=`. Examples: * inspected_storage = cloud_storage AND state = done * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = done OR state = canceled) * end_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` pageSize: integer, The standard list page size. pageToken: string, The standard list page token. type: string, The type of job. Defaults to `DlpJobType.INSPECT` diff --git a/docs/dyn/dlp_v2.projects.inspectTemplates.html b/docs/dyn/dlp_v2.projects.inspectTemplates.html index 89e09db8d40..ce891429f8b 100644 --- a/docs/dyn/dlp_v2.projects.inspectTemplates.html +++ b/docs/dyn/dlp_v2.projects.inspectTemplates.html @@ -721,9 +721,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -920,7 +920,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListInspectTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListInspectTemplates request. }
diff --git a/docs/dyn/dlp_v2.projects.jobTriggers.html b/docs/dyn/dlp_v2.projects.jobTriggers.html index a4e2660a83e..c2cc1bea7d9 100644 --- a/docs/dyn/dlp_v2.projects.jobTriggers.html +++ b/docs/dyn/dlp_v2.projects.jobTriggers.html @@ -4900,9 +4900,9 @@

Method Details

parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect triggers: - `status` - HEALTHY|PAUSED|CANCELLED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - 'last_run_time` - RFC 3339 formatted timestamp, surrounded by quotation marks. Nanoseconds are ignored. - 'error_count' - Number of errors that have occurred while running. * The operator must be `=` or `!=` for status and inspected_storage. Examples: * inspected_storage = cloud_storage AND status = HEALTHY * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = PAUSED OR state = HEALTHY) * last_run_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. - pageSize: integer, Size of the page, can be limited by a server. - pageToken: string, Page token to continue retrieval. Comes from previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. + orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. + pageSize: integer, Size of the page. This value can be limited by a server. + pageToken: string, Page token to continue retrieval. Comes from the previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. type: string, The type of jobs. Will use `DlpJobType.INSPECT` if not set. Allowed values DLP_JOB_TYPE_UNSPECIFIED - Defaults to INSPECT_JOB. @@ -5258,7 +5258,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of a triggeredJob. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListJobTriggers request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListJobTriggers request. }
diff --git a/docs/dyn/dlp_v2.projects.locations.deidentifyTemplates.html b/docs/dyn/dlp_v2.projects.locations.deidentifyTemplates.html index cd22789cb83..d19490a6eb8 100644 --- a/docs/dyn/dlp_v2.projects.locations.deidentifyTemplates.html +++ b/docs/dyn/dlp_v2.projects.locations.deidentifyTemplates.html @@ -2752,9 +2752,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -3628,7 +3628,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDeidentifyTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDeidentifyTemplates request. } diff --git a/docs/dyn/dlp_v2.projects.locations.discoveryConfigs.html b/docs/dyn/dlp_v2.projects.locations.discoveryConfigs.html index 9c6b669b070..dd97cb696f3 100644 --- a/docs/dyn/dlp_v2.projects.locations.discoveryConfigs.html +++ b/docs/dyn/dlp_v2.projects.locations.discoveryConfigs.html @@ -79,22 +79,22 @@

Instance Methods

Close httplib2 connections.

create(parent, body=None, x__xgafv=None)

-

Creates a config for Discovery to scan and profile storage.

+

Creates a config for discovery to scan and profile storage.

delete(name, x__xgafv=None)

-

Deletes a Discovery configuration.

+

Deletes a discovery configuration.

get(name, x__xgafv=None)

-

Gets a Discovery configuration.

+

Gets a discovery configuration.

list(parent, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists Discovery configurations.

+

Lists discovery configurations.

list_next()

Retrieves the next page of results.

patch(name, body=None, x__xgafv=None)

-

Updates a Discovery configuration.

+

Updates a discovery configuration.

Method Details

close() @@ -103,7 +103,7 @@

Method Details

create(parent, body=None, x__xgafv=None) -
Creates a config for Discovery to scan and profile storage.
+  
Creates a config for discovery to scan and profile storage.
 
 Args:
   parent: string, Required. Parent resource name. The format of this value is as follows: `projects/`PROJECT_ID`/locations/`LOCATION_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required)
@@ -111,8 +111,8 @@ 

Method Details

The object takes the form of: { # Request message for CreateDiscoveryConfig. - "configId": "A String", # The config id can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one. - "discoveryConfig": { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. The DiscoveryConfig to create. + "configId": "A String", # The config ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one. + "discoveryConfig": { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. The DiscoveryConfig to create. "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -158,13 +158,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -173,7 +173,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -188,22 +188,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -233,7 +233,7 @@

Method Details

Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -279,13 +279,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -294,7 +294,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -309,22 +309,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -348,7 +348,7 @@

Method Details

delete(name, x__xgafv=None) -
Deletes a Discovery configuration.
+  
Deletes a discovery configuration.
 
 Args:
   name: string, Required. Resource name of the project and the config, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required)
@@ -366,7 +366,7 @@ 

Method Details

get(name, x__xgafv=None) -
Gets a Discovery configuration.
+  
Gets a discovery configuration.
 
 Args:
   name: string, Required. Resource name of the project and the configuration, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required)
@@ -378,7 +378,7 @@ 

Method Details

Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -424,13 +424,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -439,7 +439,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -454,22 +454,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -493,13 +493,13 @@

Method Details

list(parent, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists Discovery configurations.
+  
Lists discovery configurations.
 
 Args:
   parent: string, Required. Parent resource name. The format of this value is as follows: `projects/`PROJECT_ID`/locations/`LOCATION_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required)
-  orderBy: string, Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status.
-  pageSize: integer, Size of the page, can be limited by a server.
-  pageToken: string, Page token to continue retrieval. Comes from previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls.
+  orderBy: string, Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status.
+  pageSize: integer, Size of the page. This value can be limited by a server.
+  pageToken: string, Page token to continue retrieval. Comes from the previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -510,7 +510,7 @@ 

Method Details

{ # Response message for ListDiscoveryConfigs. "discoveryConfigs": [ # List of configs, up to page_size in ListDiscoveryConfigsRequest. - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -556,13 +556,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -571,7 +571,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -586,22 +586,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -622,7 +622,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of a DiscoveryConfig. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDiscoveryConfigs request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDiscoveryConfigs request. }
@@ -642,7 +642,7 @@

Method Details

patch(name, body=None, x__xgafv=None) -
Updates a Discovery configuration.
+  
Updates a discovery configuration.
 
 Args:
   name: string, Required. Resource name of the project and the configuration, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required)
@@ -650,7 +650,7 @@ 

Method Details

The object takes the form of: { # Request message for UpdateDiscoveryConfig. - "discoveryConfig": { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # New DiscoveryConfig value. + "discoveryConfig": { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. New DiscoveryConfig value. "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -696,13 +696,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -711,7 +711,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -726,22 +726,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -772,7 +772,7 @@

Method Details

Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -818,13 +818,13 @@

Method Details

], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -833,7 +833,7 @@

Method Details

"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -848,22 +848,22 @@

Method Details

], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. diff --git a/docs/dyn/dlp_v2.projects.locations.dlpJobs.html b/docs/dyn/dlp_v2.projects.locations.dlpJobs.html index 4c87170a147..f506b2253fe 100644 --- a/docs/dyn/dlp_v2.projects.locations.dlpJobs.html +++ b/docs/dyn/dlp_v2.projects.locations.dlpJobs.html @@ -8160,7 +8160,7 @@

Method Details

parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect jobs: - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - `trigger_name` - The name of the trigger that created the job. - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * Supported fields for risk analysis jobs: - `state` - RUNNING|CANCELED|FINISHED|FAILED - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * The operator must be `=` or `!=`. Examples: * inspected_storage = cloud_storage AND state = done * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = done OR state = canceled) * end_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` pageSize: integer, The standard list page size. pageToken: string, The standard list page token. type: string, The type of job. Defaults to `DlpJobType.INSPECT` diff --git a/docs/dyn/dlp_v2.projects.locations.inspectTemplates.html b/docs/dyn/dlp_v2.projects.locations.inspectTemplates.html index 62319e3ac1a..a0c7a714da1 100644 --- a/docs/dyn/dlp_v2.projects.locations.inspectTemplates.html +++ b/docs/dyn/dlp_v2.projects.locations.inspectTemplates.html @@ -721,9 +721,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -920,7 +920,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListInspectTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListInspectTemplates request. }
diff --git a/docs/dyn/dlp_v2.projects.locations.jobTriggers.html b/docs/dyn/dlp_v2.projects.locations.jobTriggers.html index 2d186e86fd0..2f0c5b312c0 100644 --- a/docs/dyn/dlp_v2.projects.locations.jobTriggers.html +++ b/docs/dyn/dlp_v2.projects.locations.jobTriggers.html @@ -4991,9 +4991,9 @@

Method Details

parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect triggers: - `status` - HEALTHY|PAUSED|CANCELLED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - 'last_run_time` - RFC 3339 formatted timestamp, surrounded by quotation marks. Nanoseconds are ignored. - 'error_count' - Number of errors that have occurred while running. * The operator must be `=` or `!=` for status and inspected_storage. Examples: * inspected_storage = cloud_storage AND status = HEALTHY * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = PAUSED OR state = HEALTHY) * last_run_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. - pageSize: integer, Size of the page, can be limited by a server. - pageToken: string, Page token to continue retrieval. Comes from previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. + orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. + pageSize: integer, Size of the page. This value can be limited by a server. + pageToken: string, Page token to continue retrieval. Comes from the previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. type: string, The type of jobs. Will use `DlpJobType.INSPECT` if not set. Allowed values DLP_JOB_TYPE_UNSPECIFIED - Defaults to INSPECT_JOB. @@ -5349,7 +5349,7 @@

Method Details

"updateTime": "A String", # Output only. The last update timestamp of a triggeredJob. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListJobTriggers request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListJobTriggers request. }
diff --git a/docs/dyn/dlp_v2.projects.locations.storedInfoTypes.html b/docs/dyn/dlp_v2.projects.locations.storedInfoTypes.html index c16418792f2..2de077729da 100644 --- a/docs/dyn/dlp_v2.projects.locations.storedInfoTypes.html +++ b/docs/dyn/dlp_v2.projects.locations.storedInfoTypes.html @@ -466,9 +466,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -478,7 +478,7 @@

Method Details

An object of the form: { # Response message for ListStoredInfoTypes. - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListStoredInfoTypes request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListStoredInfoTypes request. "storedInfoTypes": [ # List of storedInfoTypes, up to page_size in ListStoredInfoTypesRequest. { # StoredInfoType resource message that contains information about the current version and any pending updates. "currentVersion": { # Version of a StoredInfoType, including the configuration used to build it, create timestamp, and current state. # Current version of the stored info type. diff --git a/docs/dyn/dlp_v2.projects.storedInfoTypes.html b/docs/dyn/dlp_v2.projects.storedInfoTypes.html index 82292875257..5917181d395 100644 --- a/docs/dyn/dlp_v2.projects.storedInfoTypes.html +++ b/docs/dyn/dlp_v2.projects.storedInfoTypes.html @@ -466,9 +466,9 @@

Method Details

Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -478,7 +478,7 @@

Method Details

An object of the form: { # Response message for ListStoredInfoTypes. - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListStoredInfoTypes request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListStoredInfoTypes request. "storedInfoTypes": [ # List of storedInfoTypes, up to page_size in ListStoredInfoTypesRequest. { # StoredInfoType resource message that contains information about the current version and any pending updates. "currentVersion": { # Version of a StoredInfoType, including the configuration used to build it, create timestamp, and current state. # Current version of the stored info type. diff --git a/docs/dyn/gmail_v1.users.settings.cse.keypairs.html b/docs/dyn/gmail_v1.users.settings.cse.keypairs.html index b985a208d61..696adfcb3cc 100644 --- a/docs/dyn/gmail_v1.users.settings.cse.keypairs.html +++ b/docs/dyn/gmail_v1.users.settings.cse.keypairs.html @@ -121,6 +121,9 @@

Method Details

"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -149,6 +152,9 @@

Method Details

"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -191,6 +197,9 @@

Method Details

"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -233,6 +242,9 @@

Method Details

"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -269,6 +281,9 @@

Method Details

"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -308,6 +323,9 @@

Method Details

"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. diff --git a/docs/dyn/identitytoolkit_v1.accounts.html b/docs/dyn/identitytoolkit_v1.accounts.html index 81e0a0edd87..f5662219655 100644 --- a/docs/dyn/identitytoolkit_v1.accounts.html +++ b/docs/dyn/identitytoolkit_v1.accounts.html @@ -177,7 +177,7 @@

Method Details

"providerId": "A String", # The provider ID from the request, if provided. "registered": True or False, # Whether the email identifier represents an existing account. Present only when an email identifier is set in the request. "sessionId": "A String", # The session ID from the request, or a random string generated by CreateAuthUri if absent. It is used to prevent session fixation attacks. - "signinMethods": [ # The list of sign-in methods that the user has previously used. Each element is one of `password`, `emailLink`, or the provider ID of an IdP. Present only when a registered email identifier is set in the request. + "signinMethods": [ # The list of sign-in methods that the user has previously used. Each element is one of `password`, `emailLink`, or the provider ID of an IdP. Present only when a registered email identifier is set in the request. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, this method returns an empty list. "A String", ], }
@@ -867,7 +867,7 @@

Method Details

], "disableUser": True or False, # If true, marks the account as disabled, meaning the user will no longer be able to sign-in. "displayName": "A String", # The user's new display name to be updated in the account's attributes. The length of the display name must be less than or equal to 256 characters. - "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. + "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, the email cannot be changed by the user without verifying the email first, but it can be changed by an administrator. "emailVerified": True or False, # Whether the user's email has been verified. Specifying this field requires a Google OAuth 2.0 credential with proper [permissions] (https://cloud.google.com/identity-platform/docs/access-control). "idToken": "A String", # A valid Identity Platform ID token. Required when attempting to change user-related information. "instanceId": "A String", diff --git a/docs/dyn/identitytoolkit_v1.projects.accounts.html b/docs/dyn/identitytoolkit_v1.projects.accounts.html index 4b390a3434a..857a2840f74 100644 --- a/docs/dyn/identitytoolkit_v1.projects.accounts.html +++ b/docs/dyn/identitytoolkit_v1.projects.accounts.html @@ -644,7 +644,7 @@

Method Details

], "disableUser": True or False, # If true, marks the account as disabled, meaning the user will no longer be able to sign-in. "displayName": "A String", # The user's new display name to be updated in the account's attributes. The length of the display name must be less than or equal to 256 characters. - "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. + "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, the email cannot be changed by the user without verifying the email first, but it can be changed by an administrator. "emailVerified": True or False, # Whether the user's email has been verified. Specifying this field requires a Google OAuth 2.0 credential with proper [permissions] (https://cloud.google.com/identity-platform/docs/access-control). "idToken": "A String", # A valid Identity Platform ID token. Required when attempting to change user-related information. "instanceId": "A String", diff --git a/docs/dyn/identitytoolkit_v1.projects.tenants.accounts.html b/docs/dyn/identitytoolkit_v1.projects.tenants.accounts.html index 2443137ed7b..d9cba83d2bf 100644 --- a/docs/dyn/identitytoolkit_v1.projects.tenants.accounts.html +++ b/docs/dyn/identitytoolkit_v1.projects.tenants.accounts.html @@ -651,7 +651,7 @@

Method Details

], "disableUser": True or False, # If true, marks the account as disabled, meaning the user will no longer be able to sign-in. "displayName": "A String", # The user's new display name to be updated in the account's attributes. The length of the display name must be less than or equal to 256 characters. - "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. + "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, the email cannot be changed by the user without verifying the email first, but it can be changed by an administrator. "emailVerified": True or False, # Whether the user's email has been verified. Specifying this field requires a Google OAuth 2.0 credential with proper [permissions] (https://cloud.google.com/identity-platform/docs/access-control). "idToken": "A String", # A valid Identity Platform ID token. Required when attempting to change user-related information. "instanceId": "A String", diff --git a/docs/dyn/identitytoolkit_v2.projects.html b/docs/dyn/identitytoolkit_v2.projects.html index 64f4c72c396..6b4750ea3df 100644 --- a/docs/dyn/identitytoolkit_v2.projects.html +++ b/docs/dyn/identitytoolkit_v2.projects.html @@ -105,15 +105,9 @@

Instance Methods

getConfig(name, x__xgafv=None)

Retrieve an Identity Toolkit project configuration.

-

- getPasskeyConfig(name, x__xgafv=None)

-

Retrieve a passkey configuration for an Identity Toolkit project.

updateConfig(name, body=None, updateMask=None, x__xgafv=None)

Update an Identity Toolkit project configuration.

-

- updatePasskeyConfig(name, body=None, updateMask=None, x__xgafv=None)

-

Update a passkey configuration for an Identity Toolkit project.

Method Details

close() @@ -340,29 +334,6 @@

Method Details

}
-
- getPasskeyConfig(name, x__xgafv=None) -
Retrieve a passkey configuration for an Identity Toolkit project.
-
-Args:
-  name: string, Required. The resource name of the config, for example: 'projects/my-awesome-project/passkeyConfig'. (required)
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Configuration for signing in users using passkeys.
-  "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys.
-    "A String",
-  ],
-  "name": "A String", # Required. The name of the PasskeyConfig resource.
-  "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created.
-}
-
-
updateConfig(name, body=None, updateMask=None, x__xgafv=None)
Update an Identity Toolkit project configuration.
@@ -792,39 +763,4 @@ 

Method Details

}
-
- updatePasskeyConfig(name, body=None, updateMask=None, x__xgafv=None) -
Update a passkey configuration for an Identity Toolkit project.
-
-Args:
-  name: string, Required. The name of the PasskeyConfig resource. (required)
-  body: object, The request body.
-    The object takes the form of:
-
-{ # Configuration for signing in users using passkeys.
-  "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys.
-    "A String",
-  ],
-  "name": "A String", # Required. The name of the PasskeyConfig resource.
-  "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created.
-}
-
-  updateMask: string, Optional. The update mask applies to the resource. Empty update mask will result in updating nothing. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Configuration for signing in users using passkeys.
-  "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys.
-    "A String",
-  ],
-  "name": "A String", # Required. The name of the PasskeyConfig resource.
-  "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created.
-}
-
- \ No newline at end of file diff --git a/docs/dyn/identitytoolkit_v2.projects.tenants.html b/docs/dyn/identitytoolkit_v2.projects.tenants.html index 76a55f03ac7..a41d0f4a47b 100644 --- a/docs/dyn/identitytoolkit_v2.projects.tenants.html +++ b/docs/dyn/identitytoolkit_v2.projects.tenants.html @@ -104,9 +104,6 @@

Instance Methods

getIamPolicy(resource, body=None, x__xgafv=None)

Gets the access control policy for a resource. An error is returned if the resource does not exist. An empty policy is returned if the resource exists but does not have a policy set on it. Caller must have the right Google IAM permission on the resource.

-

- getPasskeyConfig(name, x__xgafv=None)

-

Retrieve a passkey configuration for an Identity Toolkit project.

list(parent, pageSize=None, pageToken=None, x__xgafv=None)

List tenants under the given agent project. Requires read permission on the Agent project.

@@ -122,9 +119,6 @@

Instance Methods

testIamPermissions(resource, body=None, x__xgafv=None)

Returns the caller's permissions on a resource. An error is returned if the resource does not exist. A caller is not required to have Google IAM permission to make this request.

-

- updatePasskeyConfig(name, body=None, updateMask=None, x__xgafv=None)

-

Update a passkey configuration for an Identity Toolkit project.

Method Details

close() @@ -529,29 +523,6 @@

Method Details

}
-
- getPasskeyConfig(name, x__xgafv=None) -
Retrieve a passkey configuration for an Identity Toolkit project.
-
-Args:
-  name: string, Required. The resource name of the config, for example: 'projects/my-awesome-project/passkeyConfig'. (required)
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Configuration for signing in users using passkeys.
-  "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys.
-    "A String",
-  ],
-  "name": "A String", # Required. The name of the PasskeyConfig resource.
-  "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created.
-}
-
-
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List tenants under the given agent project. Requires read permission on the Agent project.
@@ -1013,39 +984,4 @@ 

Method Details

}
-
- updatePasskeyConfig(name, body=None, updateMask=None, x__xgafv=None) -
Update a passkey configuration for an Identity Toolkit project.
-
-Args:
-  name: string, Required. The name of the PasskeyConfig resource. (required)
-  body: object, The request body.
-    The object takes the form of:
-
-{ # Configuration for signing in users using passkeys.
-  "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys.
-    "A String",
-  ],
-  "name": "A String", # Required. The name of the PasskeyConfig resource.
-  "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created.
-}
-
-  updateMask: string, Optional. The update mask applies to the resource. Empty update mask will result in updating nothing. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Configuration for signing in users using passkeys.
-  "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys.
-    "A String",
-  ],
-  "name": "A String", # Required. The name of the PasskeyConfig resource.
-  "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created.
-}
-
- \ No newline at end of file diff --git a/docs/dyn/index.md b/docs/dyn/index.md index ad4af29e676..623e171ca30 100644 --- a/docs/dyn/index.md +++ b/docs/dyn/index.md @@ -162,6 +162,10 @@ * [v1alpha](http://googleapis.github.io/google-api-python-client/docs/dyn/beyondcorp_v1alpha.html) +## biglake +* [v1](http://googleapis.github.io/google-api-python-client/docs/dyn/biglake_v1.html) + + ## bigquery * [v2](http://googleapis.github.io/google-api-python-client/docs/dyn/bigquery_v2.html) diff --git a/docs/dyn/metastore_v1.projects.locations.services.backups.html b/docs/dyn/metastore_v1.projects.locations.services.backups.html index 2befafd556a..b67a27455b8 100644 --- a/docs/dyn/metastore_v1.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1.projects.locations.services.backups.html @@ -167,6 +167,11 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + }, + }, "metadataManagementActivity": { # The metadata management activities of the metastore service. # Output only. The metadata management activities of the metastore service. "metadataExports": [ # Output only. The latest metadata exports of the metastore service. { # The details of a metadata export operation. @@ -352,6 +357,11 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + }, + }, "metadataManagementActivity": { # The metadata management activities of the metastore service. # Output only. The metadata management activities of the metastore service. "metadataExports": [ # Output only. The latest metadata exports of the metastore service. { # The details of a metadata export operation. @@ -525,6 +535,11 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + }, + }, "metadataManagementActivity": { # The metadata management activities of the metastore service. # Output only. The metadata management activities of the metastore service. "metadataExports": [ # Output only. The latest metadata exports of the metastore service. { # The details of a metadata export operation. diff --git a/docs/dyn/metastore_v1.projects.locations.services.html b/docs/dyn/metastore_v1.projects.locations.services.html index 02c1521a2e7..ede71e5365d 100644 --- a/docs/dyn/metastore_v1.projects.locations.services.html +++ b/docs/dyn/metastore_v1.projects.locations.services.html @@ -233,6 +233,11 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + }, + }, "metadataManagementActivity": { # The metadata management activities of the metastore service. # Output only. The metadata management activities of the metastore service. "metadataExports": [ # Output only. The latest metadata exports of the metastore service. { # The details of a metadata export operation. @@ -452,6 +457,11 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + }, + }, "metadataManagementActivity": { # The metadata management activities of the metastore service. # Output only. The metadata management activities of the metastore service. "metadataExports": [ # Output only. The latest metadata exports of the metastore service. { # The details of a metadata export operation. @@ -616,6 +626,11 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + }, + }, "metadataManagementActivity": { # The metadata management activities of the metastore service. # Output only. The metadata management activities of the metastore service. "metadataExports": [ # Output only. The latest metadata exports of the metastore service. { # The details of a metadata export operation. @@ -783,6 +798,11 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + }, + }, "metadataManagementActivity": { # The metadata management activities of the metastore service. # Output only. The metadata management activities of the metastore service. "metadataExports": [ # Output only. The latest metadata exports of the metastore service. { # The details of a metadata export operation. diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html index 7586c780d8b..4be301819fb 100644 --- a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html @@ -171,9 +171,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -370,9 +370,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -557,9 +557,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.html b/docs/dyn/metastore_v1alpha.projects.locations.services.html index 2a32ef0ccdf..13dd8fcc2b7 100644 --- a/docs/dyn/metastore_v1alpha.projects.locations.services.html +++ b/docs/dyn/metastore_v1alpha.projects.locations.services.html @@ -242,9 +242,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -475,9 +475,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -653,9 +653,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -834,9 +834,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html index c11481528b9..2d9756cb133 100644 --- a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html @@ -171,9 +171,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -370,9 +370,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -557,9 +557,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.html b/docs/dyn/metastore_v1beta.projects.locations.services.html index 6e915030a8c..24b673b01ad 100644 --- a/docs/dyn/metastore_v1beta.projects.locations.services.html +++ b/docs/dyn/metastore_v1beta.projects.locations.services.html @@ -242,9 +242,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -475,9 +475,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -653,9 +653,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. @@ -834,9 +834,9 @@

Method Details

"dayOfWeek": "A String", # The day of week, when the window starts. "hourOfDay": 42, # The hour of day (0-23) when the window starts. }, - "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # The setting that defines how metastore metadata should be integrated with external services and systems. - "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # The integration config for the Data Catalog service. - "enabled": True or False, # Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. + "metadataIntegration": { # Specifies how metastore metadata should be integrated with external services. # Optional. The setting that defines how metastore metadata should be integrated with external services and systems. + "dataCatalogConfig": { # Specifies how metastore metadata should be integrated with the Data Catalog service. # Optional. The integration config for the Data Catalog service. + "enabled": True or False, # Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog. }, "dataplexConfig": { # Specifies how metastore metadata should be integrated with the Dataplex service. # The integration config for the Dataplex service. "lakeResources": { # A reference to the Lake resources that this metastore service is attached to. The key is the lake resource name. Example: projects/{project_number}/locations/{location_id}/lakes/{lake_id}. diff --git a/docs/dyn/monitoring_v3.projects.alertPolicies.html b/docs/dyn/monitoring_v3.projects.alertPolicies.html index 4f61d096329..2f384267f5c 100644 --- a/docs/dyn/monitoring_v3.projects.alertPolicies.html +++ b/docs/dyn/monitoring_v3.projects.alertPolicies.html @@ -198,7 +198,7 @@

Method Details

"evaluationMissingData": "A String", # A condition control that determines how metric-threshold conditions are evaluated when data stops arriving. "filter": "A String", # Required. A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length. "forecastOptions": { # Options used when forecasting the time series and testing the predicted value against the threshold. # When this field is present, the MetricThreshold condition forecasts whether the time series is predicted to violate the threshold within the forecast_horizon. When this field is not set, the MetricThreshold tests the current value of the timeseries against the threshold. - "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. + "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. The forecast horizon can range from 1 hour to 60 hours. }, "thresholdValue": 3.14, # A value against which to compare the time series. "trigger": { # Specifies how many time series must fail a predicate to trigger a condition. If not specified, then a {count: 1} trigger is used. # The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by filter and aggregations, or by the ratio, if denominator_filter and denominator_aggregations are specified. @@ -339,7 +339,7 @@

Method Details

"evaluationMissingData": "A String", # A condition control that determines how metric-threshold conditions are evaluated when data stops arriving. "filter": "A String", # Required. A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length. "forecastOptions": { # Options used when forecasting the time series and testing the predicted value against the threshold. # When this field is present, the MetricThreshold condition forecasts whether the time series is predicted to violate the threshold within the forecast_horizon. When this field is not set, the MetricThreshold tests the current value of the timeseries against the threshold. - "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. + "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. The forecast horizon can range from 1 hour to 60 hours. }, "thresholdValue": 3.14, # A value against which to compare the time series. "trigger": { # Specifies how many time series must fail a predicate to trigger a condition. If not specified, then a {count: 1} trigger is used. # The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by filter and aggregations, or by the ratio, if denominator_filter and denominator_aggregations are specified. @@ -505,7 +505,7 @@

Method Details

"evaluationMissingData": "A String", # A condition control that determines how metric-threshold conditions are evaluated when data stops arriving. "filter": "A String", # Required. A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length. "forecastOptions": { # Options used when forecasting the time series and testing the predicted value against the threshold. # When this field is present, the MetricThreshold condition forecasts whether the time series is predicted to violate the threshold within the forecast_horizon. When this field is not set, the MetricThreshold tests the current value of the timeseries against the threshold. - "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. + "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. The forecast horizon can range from 1 hour to 60 hours. }, "thresholdValue": 3.14, # A value against which to compare the time series. "trigger": { # Specifies how many time series must fail a predicate to trigger a condition. If not specified, then a {count: 1} trigger is used. # The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by filter and aggregations, or by the ratio, if denominator_filter and denominator_aggregations are specified. @@ -659,7 +659,7 @@

Method Details

"evaluationMissingData": "A String", # A condition control that determines how metric-threshold conditions are evaluated when data stops arriving. "filter": "A String", # Required. A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length. "forecastOptions": { # Options used when forecasting the time series and testing the predicted value against the threshold. # When this field is present, the MetricThreshold condition forecasts whether the time series is predicted to violate the threshold within the forecast_horizon. When this field is not set, the MetricThreshold tests the current value of the timeseries against the threshold. - "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. + "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. The forecast horizon can range from 1 hour to 60 hours. }, "thresholdValue": 3.14, # A value against which to compare the time series. "trigger": { # Specifies how many time series must fail a predicate to trigger a condition. If not specified, then a {count: 1} trigger is used. # The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by filter and aggregations, or by the ratio, if denominator_filter and denominator_aggregations are specified. @@ -820,7 +820,7 @@

Method Details

"evaluationMissingData": "A String", # A condition control that determines how metric-threshold conditions are evaluated when data stops arriving. "filter": "A String", # Required. A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length. "forecastOptions": { # Options used when forecasting the time series and testing the predicted value against the threshold. # When this field is present, the MetricThreshold condition forecasts whether the time series is predicted to violate the threshold within the forecast_horizon. When this field is not set, the MetricThreshold tests the current value of the timeseries against the threshold. - "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. + "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. The forecast horizon can range from 1 hour to 60 hours. }, "thresholdValue": 3.14, # A value against which to compare the time series. "trigger": { # Specifies how many time series must fail a predicate to trigger a condition. If not specified, then a {count: 1} trigger is used. # The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by filter and aggregations, or by the ratio, if denominator_filter and denominator_aggregations are specified. @@ -962,7 +962,7 @@

Method Details

"evaluationMissingData": "A String", # A condition control that determines how metric-threshold conditions are evaluated when data stops arriving. "filter": "A String", # Required. A filter (https://cloud.google.com/monitoring/api/v3/filters) that identifies which time series should be compared with the threshold.The filter is similar to the one that is specified in the ListTimeSeries request (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that call is useful to verify the time series that will be retrieved / processed). The filter must specify the metric type and the resource type. Optionally, it can specify resource labels and metric labels. This field must not exceed 2048 Unicode characters in length. "forecastOptions": { # Options used when forecasting the time series and testing the predicted value against the threshold. # When this field is present, the MetricThreshold condition forecasts whether the time series is predicted to violate the threshold within the forecast_horizon. When this field is not set, the MetricThreshold tests the current value of the timeseries against the threshold. - "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. + "forecastHorizon": "A String", # Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. The forecast horizon can range from 1 hour to 60 hours. }, "thresholdValue": 3.14, # A value against which to compare the time series. "trigger": { # Specifies how many time series must fail a predicate to trigger a condition. If not specified, then a {count: 1} trigger is used. # The number/percent of time series for which the comparison must hold in order for the condition to trigger. If unspecified, then the condition will trigger if the comparison is true for any of the time series that have been identified by filter and aggregations, or by the ratio, if denominator_filter and denominator_aggregations are specified. diff --git a/docs/dyn/monitoring_v3.projects.timeSeries.html b/docs/dyn/monitoring_v3.projects.timeSeries.html index 9633592f057..4c4c56b296e 100644 --- a/docs/dyn/monitoring_v3.projects.timeSeries.html +++ b/docs/dyn/monitoring_v3.projects.timeSeries.html @@ -79,7 +79,7 @@

Instance Methods

Close httplib2 connections.

create(name, body=None, x__xgafv=None)

-

Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.

+

Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response. This method does not support resource locations constraint of an organization policy (https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).

createService(name, body=None, x__xgafv=None)

Creates or adds data to one or more service time series. A service time series is a time series for a metric from a Google Cloud service. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response. This endpoint rejects writes to user-defined metrics. This method is only for use by Google Cloud services. Use projects.timeSeries.create instead.

@@ -103,7 +103,7 @@

Method Details

create(name, body=None, x__xgafv=None) -
Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.
+  
Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response. This method does not support resource locations constraint of an organization policy (https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).
 
 Args:
   name: string, Required. The project (https://cloud.google.com/monitoring/api/v3#project_name) on which to execute the request. The format is: projects/[PROJECT_ID_OR_NUMBER]  (required)
diff --git a/docs/dyn/mybusinesslodging_v1.locations.html b/docs/dyn/mybusinesslodging_v1.locations.html
index 138b633b1c5..96d36ae0bb1 100644
--- a/docs/dyn/mybusinesslodging_v1.locations.html
+++ b/docs/dyn/mybusinesslodging_v1.locations.html
@@ -1121,23 +1121,23 @@ 

Method Details

"energyEfficientLightingException": "A String", # Energy efficient lighting exception. "energySavingThermostats": True or False, # Energy saving thermostats. The property installed energy-saving thermostats throughout the building to conserve energy when rooms or areas are not in use. Energy-saving thermostats are devices that control heating/cooling in the building by learning temperature preferences and automatically adjusting to energy-saving temperatures as the default. The thermostats are automatically set to a temperature between 68-78 degrees F (20-26 °C), depending on seasonality. In the winter, set the thermostat to 68°F (20°C) when the room is occupied, lowering room temperature when unoccupied. In the summer, set the thermostat to 78°F (26°C) when the room is occupied. "energySavingThermostatsException": "A String", # Energy saving thermostats exception. - "greenBuildingDesign": True or False, # Output only. Green building design. True if BREEAM-* or LEED-* certified. + "greenBuildingDesign": True or False, # Output only. Green building design. True if the property has been awarded a relevant certification. "greenBuildingDesignException": "A String", # Output only. Green building design exception. "independentOrganizationAuditsEnergyUse": True or False, # Independent organization audits energy use. The property conducts an energy audit at least every 5 years, the results of which are either verified by a third-party and/or published in external communications. An energy audit is a detailed assessment of the facility which provides recommendations to existing operations and procedures to improve energy efficiency, available incentives or rebates,and opportunities for improvements through renovations or upgrades. Examples of organizations that conduct credible third party audits include: Engie Impact, DNV GL (EU), Dexma, and local utility providers (they often provide energy and water audits). "independentOrganizationAuditsEnergyUseException": "A String", # Independent organization audits energy use exception. }, - "sustainabilityCertifications": { # Sustainability certifications the hotel has been awarded. # Sustainability certifications the hotel has been awarded. + "sustainabilityCertifications": { # Sustainability certifications the hotel has been awarded. Deprecated: this message is no longer populated. All certification data is now provided by BeCause. # Sustainability certifications the hotel has been awarded. Deprecated: this field is no longer populated. All certification data is now provided by BeCause. "breeamCertification": "A String", # BREEAM certification. "breeamCertificationException": "A String", # BREEAM certification exception. "ecoCertifications": [ # The eco certificates awarded to the hotel. - { # An eco certificate awarded to the hotel. + { # An eco certificate awarded to the hotel. Deprecated: this message is no longer populated. All certification data is now provided by BeCause. "awarded": True or False, # Whether the eco certificate was awarded or not. "awardedException": "A String", # Awarded exception. "ecoCertificate": "A String", # Required. The eco certificate. }, ], - "leedCertification": "A String", # LEED certification. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC. - "leedCertificationException": "A String", # LEED certification exception. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC. + "leedCertification": "A String", # LEED certification. + "leedCertificationException": "A String", # LEED certification exception. }, "sustainableSourcing": { # Sustainable sourcing practices implemented at the hotel. # Sustainable sourcing practices implemented at the hotel. "ecoFriendlyToiletries": True or False, # Eco friendly toiletries. Soap, shampoo, lotion, and other toiletries provided for guests have a nationally or internationally recognized sustainability certification, such as USDA Organic, EU Organic, or cruelty-free. @@ -2270,23 +2270,23 @@

Method Details

"energyEfficientLightingException": "A String", # Energy efficient lighting exception. "energySavingThermostats": True or False, # Energy saving thermostats. The property installed energy-saving thermostats throughout the building to conserve energy when rooms or areas are not in use. Energy-saving thermostats are devices that control heating/cooling in the building by learning temperature preferences and automatically adjusting to energy-saving temperatures as the default. The thermostats are automatically set to a temperature between 68-78 degrees F (20-26 °C), depending on seasonality. In the winter, set the thermostat to 68°F (20°C) when the room is occupied, lowering room temperature when unoccupied. In the summer, set the thermostat to 78°F (26°C) when the room is occupied. "energySavingThermostatsException": "A String", # Energy saving thermostats exception. - "greenBuildingDesign": True or False, # Output only. Green building design. True if BREEAM-* or LEED-* certified. + "greenBuildingDesign": True or False, # Output only. Green building design. True if the property has been awarded a relevant certification. "greenBuildingDesignException": "A String", # Output only. Green building design exception. "independentOrganizationAuditsEnergyUse": True or False, # Independent organization audits energy use. The property conducts an energy audit at least every 5 years, the results of which are either verified by a third-party and/or published in external communications. An energy audit is a detailed assessment of the facility which provides recommendations to existing operations and procedures to improve energy efficiency, available incentives or rebates,and opportunities for improvements through renovations or upgrades. Examples of organizations that conduct credible third party audits include: Engie Impact, DNV GL (EU), Dexma, and local utility providers (they often provide energy and water audits). "independentOrganizationAuditsEnergyUseException": "A String", # Independent organization audits energy use exception. }, - "sustainabilityCertifications": { # Sustainability certifications the hotel has been awarded. # Sustainability certifications the hotel has been awarded. + "sustainabilityCertifications": { # Sustainability certifications the hotel has been awarded. Deprecated: this message is no longer populated. All certification data is now provided by BeCause. # Sustainability certifications the hotel has been awarded. Deprecated: this field is no longer populated. All certification data is now provided by BeCause. "breeamCertification": "A String", # BREEAM certification. "breeamCertificationException": "A String", # BREEAM certification exception. "ecoCertifications": [ # The eco certificates awarded to the hotel. - { # An eco certificate awarded to the hotel. + { # An eco certificate awarded to the hotel. Deprecated: this message is no longer populated. All certification data is now provided by BeCause. "awarded": True or False, # Whether the eco certificate was awarded or not. "awardedException": "A String", # Awarded exception. "ecoCertificate": "A String", # Required. The eco certificate. }, ], - "leedCertification": "A String", # LEED certification. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC. - "leedCertificationException": "A String", # LEED certification exception. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC. + "leedCertification": "A String", # LEED certification. + "leedCertificationException": "A String", # LEED certification exception. }, "sustainableSourcing": { # Sustainable sourcing practices implemented at the hotel. # Sustainable sourcing practices implemented at the hotel. "ecoFriendlyToiletries": True or False, # Eco friendly toiletries. Soap, shampoo, lotion, and other toiletries provided for guests have a nationally or internationally recognized sustainability certification, such as USDA Organic, EU Organic, or cruelty-free. @@ -3418,23 +3418,23 @@

Method Details

"energyEfficientLightingException": "A String", # Energy efficient lighting exception. "energySavingThermostats": True or False, # Energy saving thermostats. The property installed energy-saving thermostats throughout the building to conserve energy when rooms or areas are not in use. Energy-saving thermostats are devices that control heating/cooling in the building by learning temperature preferences and automatically adjusting to energy-saving temperatures as the default. The thermostats are automatically set to a temperature between 68-78 degrees F (20-26 °C), depending on seasonality. In the winter, set the thermostat to 68°F (20°C) when the room is occupied, lowering room temperature when unoccupied. In the summer, set the thermostat to 78°F (26°C) when the room is occupied. "energySavingThermostatsException": "A String", # Energy saving thermostats exception. - "greenBuildingDesign": True or False, # Output only. Green building design. True if BREEAM-* or LEED-* certified. + "greenBuildingDesign": True or False, # Output only. Green building design. True if the property has been awarded a relevant certification. "greenBuildingDesignException": "A String", # Output only. Green building design exception. "independentOrganizationAuditsEnergyUse": True or False, # Independent organization audits energy use. The property conducts an energy audit at least every 5 years, the results of which are either verified by a third-party and/or published in external communications. An energy audit is a detailed assessment of the facility which provides recommendations to existing operations and procedures to improve energy efficiency, available incentives or rebates,and opportunities for improvements through renovations or upgrades. Examples of organizations that conduct credible third party audits include: Engie Impact, DNV GL (EU), Dexma, and local utility providers (they often provide energy and water audits). "independentOrganizationAuditsEnergyUseException": "A String", # Independent organization audits energy use exception. }, - "sustainabilityCertifications": { # Sustainability certifications the hotel has been awarded. # Sustainability certifications the hotel has been awarded. + "sustainabilityCertifications": { # Sustainability certifications the hotel has been awarded. Deprecated: this message is no longer populated. All certification data is now provided by BeCause. # Sustainability certifications the hotel has been awarded. Deprecated: this field is no longer populated. All certification data is now provided by BeCause. "breeamCertification": "A String", # BREEAM certification. "breeamCertificationException": "A String", # BREEAM certification exception. "ecoCertifications": [ # The eco certificates awarded to the hotel. - { # An eco certificate awarded to the hotel. + { # An eco certificate awarded to the hotel. Deprecated: this message is no longer populated. All certification data is now provided by BeCause. "awarded": True or False, # Whether the eco certificate was awarded or not. "awardedException": "A String", # Awarded exception. "ecoCertificate": "A String", # Required. The eco certificate. }, ], - "leedCertification": "A String", # LEED certification. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC. - "leedCertificationException": "A String", # LEED certification exception. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC. + "leedCertification": "A String", # LEED certification. + "leedCertificationException": "A String", # LEED certification exception. }, "sustainableSourcing": { # Sustainable sourcing practices implemented at the hotel. # Sustainable sourcing practices implemented at the hotel. "ecoFriendlyToiletries": True or False, # Eco friendly toiletries. Soap, shampoo, lotion, and other toiletries provided for guests have a nationally or internationally recognized sustainability certification, such as USDA Organic, EU Organic, or cruelty-free. diff --git a/docs/dyn/mybusinesslodging_v1.locations.lodging.html b/docs/dyn/mybusinesslodging_v1.locations.lodging.html index d47e1edbb51..938c726731e 100644 --- a/docs/dyn/mybusinesslodging_v1.locations.lodging.html +++ b/docs/dyn/mybusinesslodging_v1.locations.lodging.html @@ -1115,23 +1115,23 @@

Method Details

"energyEfficientLightingException": "A String", # Energy efficient lighting exception. "energySavingThermostats": True or False, # Energy saving thermostats. The property installed energy-saving thermostats throughout the building to conserve energy when rooms or areas are not in use. Energy-saving thermostats are devices that control heating/cooling in the building by learning temperature preferences and automatically adjusting to energy-saving temperatures as the default. The thermostats are automatically set to a temperature between 68-78 degrees F (20-26 °C), depending on seasonality. In the winter, set the thermostat to 68°F (20°C) when the room is occupied, lowering room temperature when unoccupied. In the summer, set the thermostat to 78°F (26°C) when the room is occupied. "energySavingThermostatsException": "A String", # Energy saving thermostats exception. - "greenBuildingDesign": True or False, # Output only. Green building design. True if BREEAM-* or LEED-* certified. + "greenBuildingDesign": True or False, # Output only. Green building design. True if the property has been awarded a relevant certification. "greenBuildingDesignException": "A String", # Output only. Green building design exception. "independentOrganizationAuditsEnergyUse": True or False, # Independent organization audits energy use. The property conducts an energy audit at least every 5 years, the results of which are either verified by a third-party and/or published in external communications. An energy audit is a detailed assessment of the facility which provides recommendations to existing operations and procedures to improve energy efficiency, available incentives or rebates,and opportunities for improvements through renovations or upgrades. Examples of organizations that conduct credible third party audits include: Engie Impact, DNV GL (EU), Dexma, and local utility providers (they often provide energy and water audits). "independentOrganizationAuditsEnergyUseException": "A String", # Independent organization audits energy use exception. }, - "sustainabilityCertifications": { # Sustainability certifications the hotel has been awarded. # Sustainability certifications the hotel has been awarded. + "sustainabilityCertifications": { # Sustainability certifications the hotel has been awarded. Deprecated: this message is no longer populated. All certification data is now provided by BeCause. # Sustainability certifications the hotel has been awarded. Deprecated: this field is no longer populated. All certification data is now provided by BeCause. "breeamCertification": "A String", # BREEAM certification. "breeamCertificationException": "A String", # BREEAM certification exception. "ecoCertifications": [ # The eco certificates awarded to the hotel. - { # An eco certificate awarded to the hotel. + { # An eco certificate awarded to the hotel. Deprecated: this message is no longer populated. All certification data is now provided by BeCause. "awarded": True or False, # Whether the eco certificate was awarded or not. "awardedException": "A String", # Awarded exception. "ecoCertificate": "A String", # Required. The eco certificate. }, ], - "leedCertification": "A String", # LEED certification. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC. - "leedCertificationException": "A String", # LEED certification exception. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC. + "leedCertification": "A String", # LEED certification. + "leedCertificationException": "A String", # LEED certification exception. }, "sustainableSourcing": { # Sustainable sourcing practices implemented at the hotel. # Sustainable sourcing practices implemented at the hotel. "ecoFriendlyToiletries": True or False, # Eco friendly toiletries. Soap, shampoo, lotion, and other toiletries provided for guests have a nationally or internationally recognized sustainability certification, such as USDA Organic, EU Organic, or cruelty-free. diff --git a/docs/dyn/networkservices_v1beta1.projects.locations.lbRouteExtensions.html b/docs/dyn/networkservices_v1beta1.projects.locations.lbRouteExtensions.html index 7a26713a561..a62286d4064 100644 --- a/docs/dyn/networkservices_v1beta1.projects.locations.lbRouteExtensions.html +++ b/docs/dyn/networkservices_v1beta1.projects.locations.lbRouteExtensions.html @@ -136,13 +136,13 @@

Method Details

"name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, ], - "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule. + "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule. "A String", ], "labels": { # Optional. Set of labels associated with the `LbRouteExtension` resource. The format must comply with [the following requirements](/compute/docs/labeling-resources#requirements). "a_key": "A String", }, - "loadBalancingScheme": "A String", # Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. + "loadBalancingScheme": "A String", # Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). "name": "A String", # Required. Name of the `LbRouteExtension` resource in the following format: `projects/{project}/locations/{location}/lbRouteExtensions/{lb_route_extension}`. "updateTime": "A String", # Output only. The timestamp when the resource was updated. } @@ -254,13 +254,13 @@

Method Details

"name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, ], - "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule. + "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule. "A String", ], "labels": { # Optional. Set of labels associated with the `LbRouteExtension` resource. The format must comply with [the following requirements](/compute/docs/labeling-resources#requirements). "a_key": "A String", }, - "loadBalancingScheme": "A String", # Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. + "loadBalancingScheme": "A String", # Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). "name": "A String", # Required. Name of the `LbRouteExtension` resource in the following format: `projects/{project}/locations/{location}/lbRouteExtensions/{lb_route_extension}`. "updateTime": "A String", # Output only. The timestamp when the resource was updated. }
@@ -312,13 +312,13 @@

Method Details

"name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, ], - "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule. + "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule. "A String", ], "labels": { # Optional. Set of labels associated with the `LbRouteExtension` resource. The format must comply with [the following requirements](/compute/docs/labeling-resources#requirements). "a_key": "A String", }, - "loadBalancingScheme": "A String", # Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. + "loadBalancingScheme": "A String", # Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). "name": "A String", # Required. Name of the `LbRouteExtension` resource in the following format: `projects/{project}/locations/{location}/lbRouteExtensions/{lb_route_extension}`. "updateTime": "A String", # Output only. The timestamp when the resource was updated. }, @@ -379,13 +379,13 @@

Method Details

"name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, ], - "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule. + "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule. "A String", ], "labels": { # Optional. Set of labels associated with the `LbRouteExtension` resource. The format must comply with [the following requirements](/compute/docs/labeling-resources#requirements). "a_key": "A String", }, - "loadBalancingScheme": "A String", # Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. + "loadBalancingScheme": "A String", # Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). "name": "A String", # Required. Name of the `LbRouteExtension` resource in the following format: `projects/{project}/locations/{location}/lbRouteExtensions/{lb_route_extension}`. "updateTime": "A String", # Output only. The timestamp when the resource was updated. } diff --git a/docs/dyn/networkservices_v1beta1.projects.locations.lbTrafficExtensions.html b/docs/dyn/networkservices_v1beta1.projects.locations.lbTrafficExtensions.html index 50987bde56c..aa58eadf277 100644 --- a/docs/dyn/networkservices_v1beta1.projects.locations.lbTrafficExtensions.html +++ b/docs/dyn/networkservices_v1beta1.projects.locations.lbTrafficExtensions.html @@ -136,7 +136,7 @@

Method Details

"name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, ], - "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule. + "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule. "A String", ], "labels": { # Optional. Set of labels associated with the `LbTrafficExtension` resource. The format must comply with [the following requirements](/compute/docs/labeling-resources#requirements). @@ -254,7 +254,7 @@

Method Details

"name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, ], - "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule. + "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule. "A String", ], "labels": { # Optional. Set of labels associated with the `LbTrafficExtension` resource. The format must comply with [the following requirements](/compute/docs/labeling-resources#requirements). @@ -312,7 +312,7 @@

Method Details

"name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, ], - "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule. + "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule. "A String", ], "labels": { # Optional. Set of labels associated with the `LbTrafficExtension` resource. The format must comply with [the following requirements](/compute/docs/labeling-resources#requirements). @@ -379,7 +379,7 @@

Method Details

"name": "A String", # Required. The name for this extension chain. The name is logged as part of the HTTP request logs. The name must conform with RFC-1034, is restricted to lower-cased letters, numbers and hyphens, and can have a maximum length of 63 characters. Additionally, the first character must be a letter and the last a letter or a number. }, ], - "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule. + "forwardingRules": [ # Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule. "A String", ], "labels": { # Optional. Set of labels associated with the `LbTrafficExtension` resource. The format must comply with [the following requirements](/compute/docs/labeling-resources#requirements). diff --git a/docs/dyn/notebooks_v1.projects.locations.instances.html b/docs/dyn/notebooks_v1.projects.locations.instances.html index 73cb8009488..6f885bd5307 100644 --- a/docs/dyn/notebooks_v1.projects.locations.instances.html +++ b/docs/dyn/notebooks_v1.projects.locations.instances.html @@ -230,7 +230,7 @@

Method Details

"a_key": "A String", }, "machineType": "A String", # Required. The [Compute Engine machine type](https://cloud.google.com/compute/docs/machine-types) of this instance. - "metadata": { # Custom metadata to apply to this instance. + "metadata": { # Custom metadata to apply to this instance. For example, to specify a Cloud Storage bucket for automatic backup, you can use the `gcs-data-bucket` metadata tag. Format: `"--metadata=gcs-data-bucket=``BUCKET''"`. "a_key": "A String", }, "migrated": True or False, # Output only. Bool indicating whether this notebook has been migrated to a Workbench Instance @@ -470,7 +470,7 @@

Method Details

"a_key": "A String", }, "machineType": "A String", # Required. The [Compute Engine machine type](https://cloud.google.com/compute/docs/machine-types) of this instance. - "metadata": { # Custom metadata to apply to this instance. + "metadata": { # Custom metadata to apply to this instance. For example, to specify a Cloud Storage bucket for automatic backup, you can use the `gcs-data-bucket` metadata tag. Format: `"--metadata=gcs-data-bucket=``BUCKET''"`. "a_key": "A String", }, "migrated": True or False, # Output only. Bool indicating whether this notebook has been migrated to a Workbench Instance @@ -689,7 +689,7 @@

Method Details

"a_key": "A String", }, "machineType": "A String", # Required. The [Compute Engine machine type](https://cloud.google.com/compute/docs/machine-types) of this instance. - "metadata": { # Custom metadata to apply to this instance. + "metadata": { # Custom metadata to apply to this instance. For example, to specify a Cloud Storage bucket for automatic backup, you can use the `gcs-data-bucket` metadata tag. Format: `"--metadata=gcs-data-bucket=``BUCKET''"`. "a_key": "A String", }, "migrated": True or False, # Output only. Bool indicating whether this notebook has been migrated to a Workbench Instance diff --git a/docs/dyn/notebooks_v2.projects.locations.instances.html b/docs/dyn/notebooks_v2.projects.locations.instances.html index def75730ab7..8597fd3985c 100644 --- a/docs/dyn/notebooks_v2.projects.locations.instances.html +++ b/docs/dyn/notebooks_v2.projects.locations.instances.html @@ -92,6 +92,9 @@

Instance Methods

get(name, x__xgafv=None)

Gets details of a single Instance.

+

+ getConfig(name, x__xgafv=None)

+

Gets general backend configurations that might also affect the frontend. Location is required by CCFE. Although we could bypass it to send location- less request directly to the backend job, we would need CPE (go/cloud-cpe). Having the location might also be useful depending on the query.

getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)

Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.

@@ -495,6 +498,41 @@

Method Details

}
+
+ getConfig(name, x__xgafv=None) +
Gets general backend configurations that might also affect the frontend. Location is required by CCFE. Although we could bypass it to send location- less request directly to the backend job, we would need CPE (go/cloud-cpe). Having the location might also be useful depending on the query.
+
+Args:
+  name: string, Required. Format: `projects/{project_id}/locations/{location}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for getting WbI configurations in a location
+  "availableImages": [ # Output only. The list of available images to create a WbI.
+    { # ConfigImage represents an image release available to create a WbI
+      "imageName": "A String", # Output only. The name of the image of the form workbench-instances-vYYYYmmdd--
+      "releaseName": "A String", # Output only. The release of the image of the form m123
+    },
+  ],
+  "defaultValues": { # DefaultValues represents the default configuration values. # Output only. The default values for configuration.
+    "machineType": "A String", # Output only. The default machine type used by the backend if not provided by the user.
+  },
+  "supportedValues": { # SupportedValues represents the values supported by the configuration. # Output only. The supported values for configuration.
+    "acceleratorTypes": [ # Output only. The accelerator types supported by WbI.
+      "A String",
+    ],
+    "machineTypes": [ # Output only. The machine types supported by WbI.
+      "A String",
+    ],
+  },
+}
+
+
getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)
Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
diff --git a/docs/dyn/places_v1.places.html b/docs/dyn/places_v1.places.html
index d8b1cb53a8e..770ab99dc13 100644
--- a/docs/dyn/places_v1.places.html
+++ b/docs/dyn/places_v1.places.html
@@ -74,9 +74,20 @@
 
 

Places API (New) . places

Instance Methods

+

+ photos() +

+

Returns the photos Resource.

+

close()

Close httplib2 connections.

+

+ get(name, languageCode=None, regionCode=None, x__xgafv=None)

+

Get place details with a place id (in a name) string.

+

+ searchNearby(body=None, x__xgafv=None)

+

Search for places near locations.

searchText(body=None, x__xgafv=None)

Text query based place search.

@@ -86,6 +97,776 @@

Method Details

Close httplib2 connections.
+
+ get(name, languageCode=None, regionCode=None, x__xgafv=None) +
Get place details with a place id (in a name) string.
+
+Args:
+  name: string, Required. A place ID returned in a Place (with "places/" prefix), or equivalently the name in the same Place. Format: places/*place_id*. (required)
+  languageCode: string, Optional. Place details will be displayed with the preferred language if available. Current list of supported languages: https://developers.google.com/maps/faq#languagesupport.
+  regionCode: string, Optional. The Unicode country/region code (CLDR) of the location where the request is coming from. This parameter is used to display the place details, like region-specific place name, if available. The parameter can affect results based on applicable law. For more information, see https://www.unicode.org/cldr/charts/latest/supplemental/territory_language_information.html. Note that 3-digit region codes are not currently supported.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # All the information representing a Place.
+  "accessibilityOptions": { # Information about the accessibility options a place offers. # Information about the accessibility options a place offers.
+    "wheelchairAccessibleEntrance": True or False, # Places has wheelchair accessible entrance.
+    "wheelchairAccessibleParking": True or False, # Place offers wheelchair accessible parking.
+    "wheelchairAccessibleRestroom": True or False, # Place has wheelchair accessible restroom.
+    "wheelchairAccessibleSeating": True or False, # Place has wheelchair accessible seating.
+  },
+  "addressComponents": [ # Repeated components for each locality level. Note the following facts about the address_components[] array: - The array of address components may contain more components than the formatted_address. - The array does not necessarily include all the political entities that contain an address, apart from those included in the formatted_address. To retrieve all the political entities that contain a specific address, you should use reverse geocoding, passing the latitude/longitude of the address as a parameter to the request. - The format of the response is not guaranteed to remain the same between requests. In particular, the number of address_components varies based on the address requested and can change over time for the same address. A component can change position in the array. The type of the component can change. A particular component may be missing in a later response.
+    { # The structured components that form the formatted address, if this information is available.
+      "languageCode": "A String", # The language used to format this components, in CLDR notation.
+      "longText": "A String", # The full text description or name of the address component. For example, an address component for the country Australia may have a long_name of "Australia".
+      "shortText": "A String", # An abbreviated textual name for the address component, if available. For example, an address component for the country of Australia may have a short_name of "AU".
+      "types": [ # An array indicating the type(s) of the address component.
+        "A String",
+      ],
+    },
+  ],
+  "adrFormatAddress": "A String", # The place's address in adr microformat: http://microformats.org/wiki/adr.
+  "allowsDogs": True or False, # Place allows dogs.
+  "attributions": [ # A set of data provider that must be shown with this result.
+    { # Information about data providers of this place.
+      "provider": "A String", # Name of the Place's data provider.
+      "providerUri": "A String", # URI to the Place's data provider.
+    },
+  ],
+  "businessStatus": "A String", # The business status for the place.
+  "curbsidePickup": True or False, # Specifies if the business supports curbside pickup.
+  "currentOpeningHours": { # Information about business hour of the place. # The hours of operation for the next seven days (including today). The time period starts at midnight on the date of the request and ends at 11:59 pm six days later. This field includes the special_days subfield of all hours, set for dates that have exceptional hours.
+    "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.
+    "periods": [ # The periods that this place is open during the week. The periods are in chronological order, starting with Sunday in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.
+      { # A period the place remains in open_now status.
+        "close": { # Status changing points. # The time that the place starts to be closed.
+          "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+            "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+            "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+            "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+          },
+          "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+          "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+          "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+          "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+        },
+        "open": { # Status changing points. # The time that the place starts to be open.
+          "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+            "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+            "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+            "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+          },
+          "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+          "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+          "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+          "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+        },
+      },
+    ],
+    "secondaryHoursType": "A String", # A type string used to identify the type of secondary hours.
+    "specialDays": [ # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day. Set for current_opening_hours and current_secondary_opening_hours if there are exceptional hours.
+      { # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day.
+        "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # The date of this special day.
+          "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+          "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+          "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+        },
+      },
+    ],
+    "weekdayDescriptions": [ # Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: "Sun: 18:00–06:00"
+      "A String",
+    ],
+  },
+  "currentSecondaryOpeningHours": [ # Contains an array of entries for the next seven days including information about secondary hours of a business. Secondary hours are different from a business's main hours. For example, a restaurant can specify drive through hours or delivery hours as its secondary hours. This field populates the type subfield, which draws from a predefined list of opening hours types (such as DRIVE_THROUGH, PICKUP, or TAKEOUT) based on the types of the place. This field includes the special_days subfield of all hours, set for dates that have exceptional hours.
+    { # Information about business hour of the place.
+      "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.
+      "periods": [ # The periods that this place is open during the week. The periods are in chronological order, starting with Sunday in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.
+        { # A period the place remains in open_now status.
+          "close": { # Status changing points. # The time that the place starts to be closed.
+            "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+              "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+              "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+              "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+            },
+            "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+            "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+            "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+            "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+          },
+          "open": { # Status changing points. # The time that the place starts to be open.
+            "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+              "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+              "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+              "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+            },
+            "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+            "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+            "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+            "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+          },
+        },
+      ],
+      "secondaryHoursType": "A String", # A type string used to identify the type of secondary hours.
+      "specialDays": [ # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day. Set for current_opening_hours and current_secondary_opening_hours if there are exceptional hours.
+        { # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day.
+          "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # The date of this special day.
+            "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+            "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+            "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+          },
+        },
+      ],
+      "weekdayDescriptions": [ # Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: "Sun: 18:00–06:00"
+        "A String",
+      ],
+    },
+  ],
+  "delivery": True or False, # Specifies if the business supports delivery.
+  "dineIn": True or False, # Specifies if the business supports indoor or outdoor seating options.
+  "displayName": { # Localized variant of a text in a particular language. # The localized name of the place, suitable as a short human-readable description. For example, "Google Sydney", "Starbucks", "Pyrmont", etc.
+    "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+    "text": "A String", # Localized string in the language corresponding to language_code below.
+  },
+  "editorialSummary": { # Localized variant of a text in a particular language. # Contains a summary of the place. A summary is comprised of a textual overview, and also includes the language code for these if applicable. Summary text must be presented as-is and can not be modified or altered.
+    "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+    "text": "A String", # Localized string in the language corresponding to language_code below.
+  },
+  "evChargeOptions": { # Information about the EV Charge Station hosted in Place. Terminology follows https://afdc.energy.gov/fuels/electricity_infrastructure.html One port could charge one car at a time. One port has one or more connectors. One station has one or more ports. # Information of ev charging options.
+    "connectorAggregation": [ # A list of EV charging connector aggregations that contain connectors of the same type and same charge rate.
+      { # EV charging information grouped by [type, max_charge_rate_kw]. Shows EV charge aggregation of connectors that have the same type and max charge rate in kw.
+        "availabilityLastUpdateTime": "A String", # The timestamp when the connector availability information in this aggregation was last updated.
+        "availableCount": 42, # Number of connectors in this aggregation that are currently available.
+        "count": 42, # Number of connectors in this aggregation.
+        "maxChargeRateKw": 3.14, # The static max charging rate in kw of each connector in the aggregation.
+        "outOfServiceCount": 42, # Number of connectors in this aggregation that are currently out of service.
+        "type": "A String", # The connector type of this aggregation.
+      },
+    ],
+    "connectorCount": 42, # Number of connectors at this station. However, because some ports can have multiple connectors but only be able to charge one car at a time (e.g.) the number of connectors may be greater than the total number of cars which can charge simultaneously.
+  },
+  "formattedAddress": "A String", # A full, human-readable address for this place.
+  "fuelOptions": { # The most recent information about fuel options in a gas station. This information is updated regularly. # The most recent information about fuel options in a gas station. This information is updated regularly.
+    "fuelPrices": [ # The last known fuel price for each type of fuel this station has. There is one entry per fuel type this station has. Order is not important.
+      { # Fuel price information for a given type.
+        "price": { # Represents an amount of money with its currency type. # The price of the fuel.
+          "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+          "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+          "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+        },
+        "type": "A String", # The type of fuel.
+        "updateTime": "A String", # The time the fuel price was last updated.
+      },
+    ],
+  },
+  "goodForChildren": True or False, # Place is good for children.
+  "goodForGroups": True or False, # Place accommodates groups.
+  "goodForWatchingSports": True or False, # Place is suitable for watching sports.
+  "googleMapsUri": "A String", # A URL providing more information about this place.
+  "iconBackgroundColor": "A String", # Background color for icon_mask in hex format, e.g. #909CE1.
+  "iconMaskBaseUri": "A String", # A truncated URL to an icon mask. User can access different icon type by appending type suffix to the end (eg, ".svg" or ".png").
+  "id": "A String", # The unique identifier of a place.
+  "internationalPhoneNumber": "A String", # A human-readable phone number for the place, in international format.
+  "liveMusic": True or False, # Place provides live music.
+  "location": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # The position of this place.
+    "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+    "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+  },
+  "menuForChildren": True or False, # Place has a children's menu.
+  "name": "A String", # An ID representing this place which may be used to look up this place again (a.k.a. the API "resource" name: places/place_id).
+  "nationalPhoneNumber": "A String", # A human-readable phone number for the place, in national format.
+  "outdoorSeating": True or False, # Place provides outdoor seating.
+  "parkingOptions": { # Information about parking options for the place. A parking lot could support more than one option at the same time. # Options of parking provided by the place.
+    "freeGarageParking": True or False, # Place offers free garage parking.
+    "freeParkingLot": True or False, # Place offers free parking lots.
+    "freeStreetParking": True or False, # Place offers free street parking.
+    "paidGarageParking": True or False, # Place offers paid garage parking.
+    "paidParkingLot": True or False, # Place offers paid parking lots.
+    "paidStreetParking": True or False, # Place offers paid street parking.
+    "valetParking": True or False, # Place offers valet parking.
+  },
+  "paymentOptions": { # Payment options the place accepts. # Payment options the place accepts. If a payment option data is not available, the payment option field will be unset.
+    "acceptsCashOnly": True or False, # Place accepts cash only as payment. Places with this attribute may still accept other payment methods.
+    "acceptsCreditCards": True or False, # Place accepts credit cards as payment.
+    "acceptsDebitCards": True or False, # Place accepts debit cards as payment.
+    "acceptsNfc": True or False, # Place accepts NFC payments.
+  },
+  "photos": [ # Information (including references) about photos of this place.
+    { # Information about a photo of a place.
+      "authorAttributions": [ # This photo's authors.
+        { # Information about the author of the UGC data. Used in Photo, and Review.
+          "displayName": "A String", # Name of the author of the Photo or Review.
+          "photoUri": "A String", # Profile photo URI of the author of the Photo or Review.
+          "uri": "A String", # URI of the author of the Photo or Review.
+        },
+      ],
+      "heightPx": 42, # The maximum available height, in pixels.
+      "name": "A String", # Identifier. A reference representing this place photo which may be used to look up this place photo again (a.k.a. the API "resource" name: places/{place_id}/photos/{photo}).
+      "widthPx": 42, # The maximum available width, in pixels.
+    },
+  ],
+  "plusCode": { # Plus code (http://plus.codes) is a location reference with two formats: global code defining a 14mx14m (1/8000th of a degree) or smaller rectangle, and compound code, replacing the prefix with a reference location. # Plus code of the place location lat/long.
+    "compoundCode": "A String", # Place's compound code, such as "33GV+HQ, Ramberg, Norway", containing the suffix of the global code and replacing the prefix with a formatted name of a reference entity.
+    "globalCode": "A String", # Place's global (full) code, such as "9FWM33GV+HQ", representing an 1/8000 by 1/8000 degree area (~14 by 14 meters).
+  },
+  "priceLevel": "A String", # Price level of the place.
+  "primaryType": "A String", # The primary type of the given result. This type must one of the Places API supported types. For example, "restaurant", "cafe", "airport", etc. A place can only have a single primary type. For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types
+  "primaryTypeDisplayName": { # Localized variant of a text in a particular language. # The display name of the primary type, localized to the request language if applicable. For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types
+    "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+    "text": "A String", # Localized string in the language corresponding to language_code below.
+  },
+  "rating": 3.14, # A rating between 1.0 and 5.0, based on user reviews of this place.
+  "regularOpeningHours": { # Information about business hour of the place. # The regular hours of operation.
+    "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.
+    "periods": [ # The periods that this place is open during the week. The periods are in chronological order, starting with Sunday in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.
+      { # A period the place remains in open_now status.
+        "close": { # Status changing points. # The time that the place starts to be closed.
+          "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+            "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+            "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+            "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+          },
+          "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+          "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+          "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+          "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+        },
+        "open": { # Status changing points. # The time that the place starts to be open.
+          "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+            "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+            "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+            "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+          },
+          "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+          "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+          "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+          "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+        },
+      },
+    ],
+    "secondaryHoursType": "A String", # A type string used to identify the type of secondary hours.
+    "specialDays": [ # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day. Set for current_opening_hours and current_secondary_opening_hours if there are exceptional hours.
+      { # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day.
+        "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # The date of this special day.
+          "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+          "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+          "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+        },
+      },
+    ],
+    "weekdayDescriptions": [ # Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: "Sun: 18:00–06:00"
+      "A String",
+    ],
+  },
+  "regularSecondaryOpeningHours": [ # Contains an array of entries for information about regular secondary hours of a business. Secondary hours are different from a business's main hours. For example, a restaurant can specify drive through hours or delivery hours as its secondary hours. This field populates the type subfield, which draws from a predefined list of opening hours types (such as DRIVE_THROUGH, PICKUP, or TAKEOUT) based on the types of the place.
+    { # Information about business hour of the place.
+      "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.
+      "periods": [ # The periods that this place is open during the week. The periods are in chronological order, starting with Sunday in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.
+        { # A period the place remains in open_now status.
+          "close": { # Status changing points. # The time that the place starts to be closed.
+            "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+              "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+              "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+              "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+            },
+            "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+            "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+            "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+            "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+          },
+          "open": { # Status changing points. # The time that the place starts to be open.
+            "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+              "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+              "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+              "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+            },
+            "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+            "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+            "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+            "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+          },
+        },
+      ],
+      "secondaryHoursType": "A String", # A type string used to identify the type of secondary hours.
+      "specialDays": [ # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day. Set for current_opening_hours and current_secondary_opening_hours if there are exceptional hours.
+        { # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day.
+          "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # The date of this special day.
+            "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+            "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+            "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+          },
+        },
+      ],
+      "weekdayDescriptions": [ # Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: "Sun: 18:00–06:00"
+        "A String",
+      ],
+    },
+  ],
+  "reservable": True or False, # Specifies if the place supports reservations.
+  "restroom": True or False, # Place has restroom.
+  "reviews": [ # List of reviews about this place, sorted by relevance.
+    { # Information about a review of a place.
+      "authorAttribution": { # Information about the author of the UGC data. Used in Photo, and Review. # This review's author.
+        "displayName": "A String", # Name of the author of the Photo or Review.
+        "photoUri": "A String", # Profile photo URI of the author of the Photo or Review.
+        "uri": "A String", # URI of the author of the Photo or Review.
+      },
+      "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: places/place_id/reviews/review).
+      "originalText": { # Localized variant of a text in a particular language. # The review text in its original language.
+        "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+        "text": "A String", # Localized string in the language corresponding to language_code below.
+      },
+      "publishTime": "A String", # Timestamp for the review.
+      "rating": 3.14, # A number between 1.0 and 5.0, also called the number of stars.
+      "relativePublishTimeDescription": "A String", # A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country.
+      "text": { # Localized variant of a text in a particular language. # The localized text of the review.
+        "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+        "text": "A String", # Localized string in the language corresponding to language_code below.
+      },
+    },
+  ],
+  "servesBeer": True or False, # Specifies if the place serves beer.
+  "servesBreakfast": True or False, # Specifies if the place serves breakfast.
+  "servesBrunch": True or False, # Specifies if the place serves brunch.
+  "servesCocktails": True or False, # Place serves cocktails.
+  "servesCoffee": True or False, # Place serves coffee.
+  "servesDessert": True or False, # Place serves dessert.
+  "servesDinner": True or False, # Specifies if the place serves dinner.
+  "servesLunch": True or False, # Specifies if the place serves lunch.
+  "servesVegetarianFood": True or False, # Specifies if the place serves vegetarian food.
+  "servesWine": True or False, # Specifies if the place serves wine.
+  "shortFormattedAddress": "A String", # A short, human-readable address for this place.
+  "subDestinations": [ # A list of sub destinations related to the place.
+    { # Place resource name and id of sub destinations that relate to the place. For example, different terminals are different destinations of an airport.
+      "id": "A String", # The place id of the sub destination.
+      "name": "A String", # The resource name of the sub destination.
+    },
+  ],
+  "takeout": True or False, # Specifies if the business supports takeout.
+  "types": [ # A set of type tags for this result. For example, "political" and "locality". For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types
+    "A String",
+  ],
+  "userRatingCount": 42, # The total number of reviews (with or without text) for this place.
+  "utcOffsetMinutes": 42, # Number of minutes this place's timezone is currently offset from UTC. This is expressed in minutes to support timezones that are offset by fractions of an hour, e.g. X hours and 15 minutes.
+  "viewport": { # A latitude-longitude viewport, represented as two diagonally opposite `low` and `high` points. A viewport is considered a closed region, i.e. it includes its boundary. The latitude bounds must range between -90 to 90 degrees inclusive, and the longitude bounds must range between -180 to 180 degrees inclusive. Various cases include: - If `low` = `high`, the viewport consists of that single point. - If `low.longitude` > `high.longitude`, the longitude range is inverted (the viewport crosses the 180 degree longitude line). - If `low.longitude` = -180 degrees and `high.longitude` = 180 degrees, the viewport includes all longitudes. - If `low.longitude` = 180 degrees and `high.longitude` = -180 degrees, the longitude range is empty. - If `low.latitude` > `high.latitude`, the latitude range is empty. Both `low` and `high` must be populated, and the represented box cannot be empty (as specified by the definitions above). An empty viewport will result in an error. For example, this viewport fully encloses New York City: { "low": { "latitude": 40.477398, "longitude": -74.259087 }, "high": { "latitude": 40.91618, "longitude": -73.70018 } } # A viewport suitable for displaying the place on an average-sized map.
+    "high": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # Required. The high point of the viewport.
+      "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+      "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+    },
+    "low": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # Required. The low point of the viewport.
+      "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+      "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+    },
+  },
+  "websiteUri": "A String", # The authoritative website for this place, e.g. a business' homepage. Note that for places that are part of a chain (e.g. an IKEA store), this will usually be the website for the individual store, not the overall chain.
+}
+
+ +
+ searchNearby(body=None, x__xgafv=None) +
Search for places near locations.
+
+Args:
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request proto for Search Nearby.
+  "excludedPrimaryTypes": [ # Excluded primary Place type (e.g. "restaurant" or "gas_station") from https://developers.google.com/maps/documentation/places/web-service/place-types. If there are any conflicting primary types, i.e. a type appears in both included_primary_types and excluded_primary_types, an INVALID_ARGUMENT error is returned. If a Place type is specified with multiple type restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = ["restaurant"], excluded_primary_types = ["restaurant"]}, the returned places provide "restaurant" related services but do not operate primarily as "restaurants".
+    "A String",
+  ],
+  "excludedTypes": [ # Excluded Place type (eg, "restaurant" or "gas_station") from https://developers.google.com/maps/documentation/places/web-service/place-types. If the client provides both included_types (e.g. restaurant) and excluded_types (e.g. cafe), then the response should include places that are restaurant but not cafe. The response includes places that match at least one of the included_types and none of the excluded_types. If there are any conflicting types, i.e. a type appears in both included_types and excluded_types, an INVALID_ARGUMENT error is returned. If a Place type is specified with multiple type restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = ["restaurant"], excluded_primary_types = ["restaurant"]}, the returned places provide "restaurant" related services but do not operate primarily as "restaurants".
+    "A String",
+  ],
+  "includedPrimaryTypes": [ # Included primary Place type (e.g. "restaurant" or "gas_station") from https://developers.google.com/maps/documentation/places/web-service/place-types. A place can only have a single primary type from the supported types table associated with it. If there are any conflicting primary types, i.e. a type appears in both included_primary_types and excluded_primary_types, an INVALID_ARGUMENT error is returned. If a Place type is specified with multiple type restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = ["restaurant"], excluded_primary_types = ["restaurant"]}, the returned places provide "restaurant" related services but do not operate primarily as "restaurants".
+    "A String",
+  ],
+  "includedTypes": [ # Included Place type (eg, "restaurant" or "gas_station") from https://developers.google.com/maps/documentation/places/web-service/place-types. If there are any conflicting types, i.e. a type appears in both included_types and excluded_types, an INVALID_ARGUMENT error is returned. If a Place type is specified with multiple type restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = ["restaurant"], excluded_primary_types = ["restaurant"]}, the returned places provide "restaurant" related services but do not operate primarily as "restaurants".
+    "A String",
+  ],
+  "languageCode": "A String", # Place details will be displayed with the preferred language if available. If the language code is unspecified or unrecognized, place details of any language may be returned, with a preference for English if such details exist. Current list of supported languages: https://developers.google.com/maps/faq#languagesupport.
+  "locationRestriction": { # The region to search. # Required. The region to search.
+    "circle": { # Circle with a LatLng as center and radius. # A circle defined by center point and radius.
+      "center": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # Required. Center latitude and longitude. The range of latitude must be within [-90.0, 90.0]. The range of the longitude must be within [-180.0, 180.0].
+        "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+        "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+      },
+      "radius": 3.14, # Required. Radius measured in meters. The radius must be within [0.0, 50000.0].
+    },
+  },
+  "maxResultCount": 42, # Maximum number of results to return. It must be between 1 and 20 (default), inclusively. If the number is unset, it falls back to the upper limit. If the number is set to negative or exceeds the upper limit, an INVALID_ARGUMENT error is returned.
+  "rankPreference": "A String", # How results will be ranked in the response.
+  "regionCode": "A String", # The Unicode country/region code (CLDR) of the location where the request is coming from. This parameter is used to display the place details, like region-specific place name, if available. The parameter can affect results based on applicable law. For more information, see https://www.unicode.org/cldr/charts/latest/supplemental/territory_language_information.html. Note that 3-digit region codes are not currently supported.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response proto for Search Nearby.
+  "places": [ # A list of places that meets user's requirements like places types, number of places and specific location restriction.
+    { # All the information representing a Place.
+      "accessibilityOptions": { # Information about the accessibility options a place offers. # Information about the accessibility options a place offers.
+        "wheelchairAccessibleEntrance": True or False, # Places has wheelchair accessible entrance.
+        "wheelchairAccessibleParking": True or False, # Place offers wheelchair accessible parking.
+        "wheelchairAccessibleRestroom": True or False, # Place has wheelchair accessible restroom.
+        "wheelchairAccessibleSeating": True or False, # Place has wheelchair accessible seating.
+      },
+      "addressComponents": [ # Repeated components for each locality level. Note the following facts about the address_components[] array: - The array of address components may contain more components than the formatted_address. - The array does not necessarily include all the political entities that contain an address, apart from those included in the formatted_address. To retrieve all the political entities that contain a specific address, you should use reverse geocoding, passing the latitude/longitude of the address as a parameter to the request. - The format of the response is not guaranteed to remain the same between requests. In particular, the number of address_components varies based on the address requested and can change over time for the same address. A component can change position in the array. The type of the component can change. A particular component may be missing in a later response.
+        { # The structured components that form the formatted address, if this information is available.
+          "languageCode": "A String", # The language used to format this components, in CLDR notation.
+          "longText": "A String", # The full text description or name of the address component. For example, an address component for the country Australia may have a long_name of "Australia".
+          "shortText": "A String", # An abbreviated textual name for the address component, if available. For example, an address component for the country of Australia may have a short_name of "AU".
+          "types": [ # An array indicating the type(s) of the address component.
+            "A String",
+          ],
+        },
+      ],
+      "adrFormatAddress": "A String", # The place's address in adr microformat: http://microformats.org/wiki/adr.
+      "allowsDogs": True or False, # Place allows dogs.
+      "attributions": [ # A set of data provider that must be shown with this result.
+        { # Information about data providers of this place.
+          "provider": "A String", # Name of the Place's data provider.
+          "providerUri": "A String", # URI to the Place's data provider.
+        },
+      ],
+      "businessStatus": "A String", # The business status for the place.
+      "curbsidePickup": True or False, # Specifies if the business supports curbside pickup.
+      "currentOpeningHours": { # Information about business hour of the place. # The hours of operation for the next seven days (including today). The time period starts at midnight on the date of the request and ends at 11:59 pm six days later. This field includes the special_days subfield of all hours, set for dates that have exceptional hours.
+        "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.
+        "periods": [ # The periods that this place is open during the week. The periods are in chronological order, starting with Sunday in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.
+          { # A period the place remains in open_now status.
+            "close": { # Status changing points. # The time that the place starts to be closed.
+              "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+                "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+              },
+              "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+              "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+              "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+              "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+            },
+            "open": { # Status changing points. # The time that the place starts to be open.
+              "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+                "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+              },
+              "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+              "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+              "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+              "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+            },
+          },
+        ],
+        "secondaryHoursType": "A String", # A type string used to identify the type of secondary hours.
+        "specialDays": [ # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day. Set for current_opening_hours and current_secondary_opening_hours if there are exceptional hours.
+          { # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day.
+            "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # The date of this special day.
+              "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+              "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+              "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+            },
+          },
+        ],
+        "weekdayDescriptions": [ # Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: "Sun: 18:00–06:00"
+          "A String",
+        ],
+      },
+      "currentSecondaryOpeningHours": [ # Contains an array of entries for the next seven days including information about secondary hours of a business. Secondary hours are different from a business's main hours. For example, a restaurant can specify drive through hours or delivery hours as its secondary hours. This field populates the type subfield, which draws from a predefined list of opening hours types (such as DRIVE_THROUGH, PICKUP, or TAKEOUT) based on the types of the place. This field includes the special_days subfield of all hours, set for dates that have exceptional hours.
+        { # Information about business hour of the place.
+          "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.
+          "periods": [ # The periods that this place is open during the week. The periods are in chronological order, starting with Sunday in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.
+            { # A period the place remains in open_now status.
+              "close": { # Status changing points. # The time that the place starts to be closed.
+                "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+                  "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                  "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                  "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+                },
+                "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+                "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+                "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+                "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+              },
+              "open": { # Status changing points. # The time that the place starts to be open.
+                "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+                  "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                  "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                  "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+                },
+                "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+                "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+                "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+                "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+              },
+            },
+          ],
+          "secondaryHoursType": "A String", # A type string used to identify the type of secondary hours.
+          "specialDays": [ # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day. Set for current_opening_hours and current_secondary_opening_hours if there are exceptional hours.
+            { # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day.
+              "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # The date of this special day.
+                "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+              },
+            },
+          ],
+          "weekdayDescriptions": [ # Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: "Sun: 18:00–06:00"
+            "A String",
+          ],
+        },
+      ],
+      "delivery": True or False, # Specifies if the business supports delivery.
+      "dineIn": True or False, # Specifies if the business supports indoor or outdoor seating options.
+      "displayName": { # Localized variant of a text in a particular language. # The localized name of the place, suitable as a short human-readable description. For example, "Google Sydney", "Starbucks", "Pyrmont", etc.
+        "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+        "text": "A String", # Localized string in the language corresponding to language_code below.
+      },
+      "editorialSummary": { # Localized variant of a text in a particular language. # Contains a summary of the place. A summary is comprised of a textual overview, and also includes the language code for these if applicable. Summary text must be presented as-is and can not be modified or altered.
+        "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+        "text": "A String", # Localized string in the language corresponding to language_code below.
+      },
+      "evChargeOptions": { # Information about the EV Charge Station hosted in Place. Terminology follows https://afdc.energy.gov/fuels/electricity_infrastructure.html One port could charge one car at a time. One port has one or more connectors. One station has one or more ports. # Information of ev charging options.
+        "connectorAggregation": [ # A list of EV charging connector aggregations that contain connectors of the same type and same charge rate.
+          { # EV charging information grouped by [type, max_charge_rate_kw]. Shows EV charge aggregation of connectors that have the same type and max charge rate in kw.
+            "availabilityLastUpdateTime": "A String", # The timestamp when the connector availability information in this aggregation was last updated.
+            "availableCount": 42, # Number of connectors in this aggregation that are currently available.
+            "count": 42, # Number of connectors in this aggregation.
+            "maxChargeRateKw": 3.14, # The static max charging rate in kw of each connector in the aggregation.
+            "outOfServiceCount": 42, # Number of connectors in this aggregation that are currently out of service.
+            "type": "A String", # The connector type of this aggregation.
+          },
+        ],
+        "connectorCount": 42, # Number of connectors at this station. However, because some ports can have multiple connectors but only be able to charge one car at a time (e.g.) the number of connectors may be greater than the total number of cars which can charge simultaneously.
+      },
+      "formattedAddress": "A String", # A full, human-readable address for this place.
+      "fuelOptions": { # The most recent information about fuel options in a gas station. This information is updated regularly. # The most recent information about fuel options in a gas station. This information is updated regularly.
+        "fuelPrices": [ # The last known fuel price for each type of fuel this station has. There is one entry per fuel type this station has. Order is not important.
+          { # Fuel price information for a given type.
+            "price": { # Represents an amount of money with its currency type. # The price of the fuel.
+              "currencyCode": "A String", # The three-letter currency code defined in ISO 4217.
+              "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.
+              "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar.
+            },
+            "type": "A String", # The type of fuel.
+            "updateTime": "A String", # The time the fuel price was last updated.
+          },
+        ],
+      },
+      "goodForChildren": True or False, # Place is good for children.
+      "goodForGroups": True or False, # Place accommodates groups.
+      "goodForWatchingSports": True or False, # Place is suitable for watching sports.
+      "googleMapsUri": "A String", # A URL providing more information about this place.
+      "iconBackgroundColor": "A String", # Background color for icon_mask in hex format, e.g. #909CE1.
+      "iconMaskBaseUri": "A String", # A truncated URL to an icon mask. User can access different icon type by appending type suffix to the end (eg, ".svg" or ".png").
+      "id": "A String", # The unique identifier of a place.
+      "internationalPhoneNumber": "A String", # A human-readable phone number for the place, in international format.
+      "liveMusic": True or False, # Place provides live music.
+      "location": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # The position of this place.
+        "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+        "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+      },
+      "menuForChildren": True or False, # Place has a children's menu.
+      "name": "A String", # An ID representing this place which may be used to look up this place again (a.k.a. the API "resource" name: places/place_id).
+      "nationalPhoneNumber": "A String", # A human-readable phone number for the place, in national format.
+      "outdoorSeating": True or False, # Place provides outdoor seating.
+      "parkingOptions": { # Information about parking options for the place. A parking lot could support more than one option at the same time. # Options of parking provided by the place.
+        "freeGarageParking": True or False, # Place offers free garage parking.
+        "freeParkingLot": True or False, # Place offers free parking lots.
+        "freeStreetParking": True or False, # Place offers free street parking.
+        "paidGarageParking": True or False, # Place offers paid garage parking.
+        "paidParkingLot": True or False, # Place offers paid parking lots.
+        "paidStreetParking": True or False, # Place offers paid street parking.
+        "valetParking": True or False, # Place offers valet parking.
+      },
+      "paymentOptions": { # Payment options the place accepts. # Payment options the place accepts. If a payment option data is not available, the payment option field will be unset.
+        "acceptsCashOnly": True or False, # Place accepts cash only as payment. Places with this attribute may still accept other payment methods.
+        "acceptsCreditCards": True or False, # Place accepts credit cards as payment.
+        "acceptsDebitCards": True or False, # Place accepts debit cards as payment.
+        "acceptsNfc": True or False, # Place accepts NFC payments.
+      },
+      "photos": [ # Information (including references) about photos of this place.
+        { # Information about a photo of a place.
+          "authorAttributions": [ # This photo's authors.
+            { # Information about the author of the UGC data. Used in Photo, and Review.
+              "displayName": "A String", # Name of the author of the Photo or Review.
+              "photoUri": "A String", # Profile photo URI of the author of the Photo or Review.
+              "uri": "A String", # URI of the author of the Photo or Review.
+            },
+          ],
+          "heightPx": 42, # The maximum available height, in pixels.
+          "name": "A String", # Identifier. A reference representing this place photo which may be used to look up this place photo again (a.k.a. the API "resource" name: places/{place_id}/photos/{photo}).
+          "widthPx": 42, # The maximum available width, in pixels.
+        },
+      ],
+      "plusCode": { # Plus code (http://plus.codes) is a location reference with two formats: global code defining a 14mx14m (1/8000th of a degree) or smaller rectangle, and compound code, replacing the prefix with a reference location. # Plus code of the place location lat/long.
+        "compoundCode": "A String", # Place's compound code, such as "33GV+HQ, Ramberg, Norway", containing the suffix of the global code and replacing the prefix with a formatted name of a reference entity.
+        "globalCode": "A String", # Place's global (full) code, such as "9FWM33GV+HQ", representing an 1/8000 by 1/8000 degree area (~14 by 14 meters).
+      },
+      "priceLevel": "A String", # Price level of the place.
+      "primaryType": "A String", # The primary type of the given result. This type must one of the Places API supported types. For example, "restaurant", "cafe", "airport", etc. A place can only have a single primary type. For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types
+      "primaryTypeDisplayName": { # Localized variant of a text in a particular language. # The display name of the primary type, localized to the request language if applicable. For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types
+        "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+        "text": "A String", # Localized string in the language corresponding to language_code below.
+      },
+      "rating": 3.14, # A rating between 1.0 and 5.0, based on user reviews of this place.
+      "regularOpeningHours": { # Information about business hour of the place. # The regular hours of operation.
+        "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.
+        "periods": [ # The periods that this place is open during the week. The periods are in chronological order, starting with Sunday in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.
+          { # A period the place remains in open_now status.
+            "close": { # Status changing points. # The time that the place starts to be closed.
+              "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+                "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+              },
+              "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+              "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+              "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+              "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+            },
+            "open": { # Status changing points. # The time that the place starts to be open.
+              "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+                "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+              },
+              "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+              "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+              "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+              "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+            },
+          },
+        ],
+        "secondaryHoursType": "A String", # A type string used to identify the type of secondary hours.
+        "specialDays": [ # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day. Set for current_opening_hours and current_secondary_opening_hours if there are exceptional hours.
+          { # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day.
+            "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # The date of this special day.
+              "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+              "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+              "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+            },
+          },
+        ],
+        "weekdayDescriptions": [ # Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: "Sun: 18:00–06:00"
+          "A String",
+        ],
+      },
+      "regularSecondaryOpeningHours": [ # Contains an array of entries for information about regular secondary hours of a business. Secondary hours are different from a business's main hours. For example, a restaurant can specify drive through hours or delivery hours as its secondary hours. This field populates the type subfield, which draws from a predefined list of opening hours types (such as DRIVE_THROUGH, PICKUP, or TAKEOUT) based on the types of the place.
+        { # Information about business hour of the place.
+          "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours.
+          "periods": [ # The periods that this place is open during the week. The periods are in chronological order, starting with Sunday in the place-local timezone. An empty (but not absent) value indicates a place that is never open, e.g. because it is closed temporarily for renovations.
+            { # A period the place remains in open_now status.
+              "close": { # Status changing points. # The time that the place starts to be closed.
+                "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+                  "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                  "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                  "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+                },
+                "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+                "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+                "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+                "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+              },
+              "open": { # Status changing points. # The time that the place starts to be open.
+                "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # Date in the local timezone for the place.
+                  "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                  "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                  "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+                },
+                "day": 42, # A day of the week, as an integer in the range 0-6. 0 is Sunday, 1 is Monday, etc.
+                "hour": 42, # The hour in 2 digits. Ranges from 00 to 23.
+                "minute": 42, # The minute in 2 digits. Ranges from 00 to 59.
+                "truncated": True or False, # Whether or not this endpoint was truncated. Truncation occurs when the real hours are outside the times we are willing to return hours between, so we truncate the hours back to these boundaries. This ensures that at most 24 * 7 hours from midnight of the day of the request are returned.
+              },
+            },
+          ],
+          "secondaryHoursType": "A String", # A type string used to identify the type of secondary hours.
+          "specialDays": [ # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day. Set for current_opening_hours and current_secondary_opening_hours if there are exceptional hours.
+            { # Structured information for special days that fall within the period that the returned opening hours cover. Special days are days that could impact the business hours of a place, e.g. Christmas day.
+              "date": { # Represents a whole or partial calendar date, such as a birthday. The time of day and time zone are either specified elsewhere or are insignificant. The date is relative to the Gregorian Calendar. This can represent one of the following: * A full date, with non-zero year, month, and day values. * A month and day, with a zero year (for example, an anniversary). * A year on its own, with a zero month and a zero day. * A year and month, with a zero day (for example, a credit card expiration date). Related types: * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp # The date of this special day.
+                "day": 42, # Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 to specify a year by itself or a year and month where the day isn't significant.
+                "month": 42, # Month of a year. Must be from 1 to 12, or 0 to specify a year without a month and day.
+                "year": 42, # Year of the date. Must be from 1 to 9999, or 0 to specify a date without a year.
+              },
+            },
+          ],
+          "weekdayDescriptions": [ # Localized strings describing the opening hours of this place, one string for each day of the week. Will be empty if the hours are unknown or could not be converted to localized text. Example: "Sun: 18:00–06:00"
+            "A String",
+          ],
+        },
+      ],
+      "reservable": True or False, # Specifies if the place supports reservations.
+      "restroom": True or False, # Place has restroom.
+      "reviews": [ # List of reviews about this place, sorted by relevance.
+        { # Information about a review of a place.
+          "authorAttribution": { # Information about the author of the UGC data. Used in Photo, and Review. # This review's author.
+            "displayName": "A String", # Name of the author of the Photo or Review.
+            "photoUri": "A String", # Profile photo URI of the author of the Photo or Review.
+            "uri": "A String", # URI of the author of the Photo or Review.
+          },
+          "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: places/place_id/reviews/review).
+          "originalText": { # Localized variant of a text in a particular language. # The review text in its original language.
+            "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+            "text": "A String", # Localized string in the language corresponding to language_code below.
+          },
+          "publishTime": "A String", # Timestamp for the review.
+          "rating": 3.14, # A number between 1.0 and 5.0, also called the number of stars.
+          "relativePublishTimeDescription": "A String", # A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country.
+          "text": { # Localized variant of a text in a particular language. # The localized text of the review.
+            "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
+            "text": "A String", # Localized string in the language corresponding to language_code below.
+          },
+        },
+      ],
+      "servesBeer": True or False, # Specifies if the place serves beer.
+      "servesBreakfast": True or False, # Specifies if the place serves breakfast.
+      "servesBrunch": True or False, # Specifies if the place serves brunch.
+      "servesCocktails": True or False, # Place serves cocktails.
+      "servesCoffee": True or False, # Place serves coffee.
+      "servesDessert": True or False, # Place serves dessert.
+      "servesDinner": True or False, # Specifies if the place serves dinner.
+      "servesLunch": True or False, # Specifies if the place serves lunch.
+      "servesVegetarianFood": True or False, # Specifies if the place serves vegetarian food.
+      "servesWine": True or False, # Specifies if the place serves wine.
+      "shortFormattedAddress": "A String", # A short, human-readable address for this place.
+      "subDestinations": [ # A list of sub destinations related to the place.
+        { # Place resource name and id of sub destinations that relate to the place. For example, different terminals are different destinations of an airport.
+          "id": "A String", # The place id of the sub destination.
+          "name": "A String", # The resource name of the sub destination.
+        },
+      ],
+      "takeout": True or False, # Specifies if the business supports takeout.
+      "types": [ # A set of type tags for this result. For example, "political" and "locality". For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types
+        "A String",
+      ],
+      "userRatingCount": 42, # The total number of reviews (with or without text) for this place.
+      "utcOffsetMinutes": 42, # Number of minutes this place's timezone is currently offset from UTC. This is expressed in minutes to support timezones that are offset by fractions of an hour, e.g. X hours and 15 minutes.
+      "viewport": { # A latitude-longitude viewport, represented as two diagonally opposite `low` and `high` points. A viewport is considered a closed region, i.e. it includes its boundary. The latitude bounds must range between -90 to 90 degrees inclusive, and the longitude bounds must range between -180 to 180 degrees inclusive. Various cases include: - If `low` = `high`, the viewport consists of that single point. - If `low.longitude` > `high.longitude`, the longitude range is inverted (the viewport crosses the 180 degree longitude line). - If `low.longitude` = -180 degrees and `high.longitude` = 180 degrees, the viewport includes all longitudes. - If `low.longitude` = 180 degrees and `high.longitude` = -180 degrees, the longitude range is empty. - If `low.latitude` > `high.latitude`, the latitude range is empty. Both `low` and `high` must be populated, and the represented box cannot be empty (as specified by the definitions above). An empty viewport will result in an error. For example, this viewport fully encloses New York City: { "low": { "latitude": 40.477398, "longitude": -74.259087 }, "high": { "latitude": 40.91618, "longitude": -73.70018 } } # A viewport suitable for displaying the place on an average-sized map.
+        "high": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # Required. The high point of the viewport.
+          "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+          "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+        },
+        "low": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # Required. The low point of the viewport.
+          "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0].
+          "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0].
+        },
+      },
+      "websiteUri": "A String", # The authoritative website for this place, e.g. a business' homepage. Note that for places that are part of a chain (e.g. an IKEA store), this will usually be the website for the individual store, not the overall chain.
+    },
+  ],
+}
+
+
searchText(body=None, x__xgafv=None)
Text query based place search.
@@ -128,9 +909,9 @@ 

Method Details

}, }, }, - "maxResultCount": 42, # Maximum number of results to return. It must be between 1 and 20, inclusively. If the number is unset, it falls back to the upper limit. If the number is set to negative or exceeds the upper limit, an INVALID_ARGUMENT error is returned. + "maxResultCount": 42, # Maximum number of results to return. It must be between 1 and 20, inclusively. The default is 20. If the number is unset, it falls back to the upper limit. If the number is set to negative or exceeds the upper limit, an INVALID_ARGUMENT error is returned. "minRating": 3.14, # Filter out results whose average user rating is strictly less than this limit. A valid value must be an float between 0 and 5 (inclusively) at a 0.5 cadence i.e. [0, 0.5, 1.0, ... , 5.0] inclusively. This is to keep parity with LocalRefinement_UserRating. The input rating will round up to the nearest 0.5(ceiling). For instance, a rating of 0.6 will eliminate all results with a less than 1.0 rating. - "openNow": True or False, # Used to restrict the search to places that are currently open. + "openNow": True or False, # Used to restrict the search to places that are currently open. The default is false. "priceLevels": [ # Used to restrict the search to places that are marked as certain price levels. Users can choose any combinations of price levels. Default to select all price levels. "A String", ], @@ -153,6 +934,9 @@

Method Details

{ # All the information representing a Place. "accessibilityOptions": { # Information about the accessibility options a place offers. # Information about the accessibility options a place offers. "wheelchairAccessibleEntrance": True or False, # Places has wheelchair accessible entrance. + "wheelchairAccessibleParking": True or False, # Place offers wheelchair accessible parking. + "wheelchairAccessibleRestroom": True or False, # Place has wheelchair accessible restroom. + "wheelchairAccessibleSeating": True or False, # Place has wheelchair accessible seating. }, "addressComponents": [ # Repeated components for each locality level. Note the following facts about the address_components[] array: - The array of address components may contain more components than the formatted_address. - The array does not necessarily include all the political entities that contain an address, apart from those included in the formatted_address. To retrieve all the political entities that contain a specific address, you should use reverse geocoding, passing the latitude/longitude of the address as a parameter to the request. - The format of the response is not guaranteed to remain the same between requests. In particular, the number of address_components varies based on the address requested and can change over time for the same address. A component can change position in the array. The type of the component can change. A particular component may be missing in a later response. { # The structured components that form the formatted address, if this information is available. @@ -165,6 +949,7 @@

Method Details

}, ], "adrFormatAddress": "A String", # The place's address in adr microformat: http://microformats.org/wiki/adr. + "allowsDogs": True or False, # Place allows dogs. "attributions": [ # A set of data provider that must be shown with this result. { # Information about data providers of this place. "provider": "A String", # Name of the Place's data provider. @@ -269,23 +1054,89 @@

Method Details

"languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, + "evChargeOptions": { # Information about the EV Charge Station hosted in Place. Terminology follows https://afdc.energy.gov/fuels/electricity_infrastructure.html One port could charge one car at a time. One port has one or more connectors. One station has one or more ports. # Information of ev charging options. + "connectorAggregation": [ # A list of EV charging connector aggregations that contain connectors of the same type and same charge rate. + { # EV charging information grouped by [type, max_charge_rate_kw]. Shows EV charge aggregation of connectors that have the same type and max charge rate in kw. + "availabilityLastUpdateTime": "A String", # The timestamp when the connector availability information in this aggregation was last updated. + "availableCount": 42, # Number of connectors in this aggregation that are currently available. + "count": 42, # Number of connectors in this aggregation. + "maxChargeRateKw": 3.14, # The static max charging rate in kw of each connector in the aggregation. + "outOfServiceCount": 42, # Number of connectors in this aggregation that are currently out of service. + "type": "A String", # The connector type of this aggregation. + }, + ], + "connectorCount": 42, # Number of connectors at this station. However, because some ports can have multiple connectors but only be able to charge one car at a time (e.g.) the number of connectors may be greater than the total number of cars which can charge simultaneously. + }, "formattedAddress": "A String", # A full, human-readable address for this place. + "fuelOptions": { # The most recent information about fuel options in a gas station. This information is updated regularly. # The most recent information about fuel options in a gas station. This information is updated regularly. + "fuelPrices": [ # The last known fuel price for each type of fuel this station has. There is one entry per fuel type this station has. Order is not important. + { # Fuel price information for a given type. + "price": { # Represents an amount of money with its currency type. # The price of the fuel. + "currencyCode": "A String", # The three-letter currency code defined in ISO 4217. + "nanos": 42, # Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000. + "units": "A String", # The whole units of the amount. For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. + }, + "type": "A String", # The type of fuel. + "updateTime": "A String", # The time the fuel price was last updated. + }, + ], + }, + "goodForChildren": True or False, # Place is good for children. + "goodForGroups": True or False, # Place accommodates groups. + "goodForWatchingSports": True or False, # Place is suitable for watching sports. "googleMapsUri": "A String", # A URL providing more information about this place. "iconBackgroundColor": "A String", # Background color for icon_mask in hex format, e.g. #909CE1. - "iconMaskBaseUri": "A String", # A truncated URL to an v2 icon mask. User can access different icon type by appending type suffix to the end (eg, ".svg" or ".png"). + "iconMaskBaseUri": "A String", # A truncated URL to an icon mask. User can access different icon type by appending type suffix to the end (eg, ".svg" or ".png"). "id": "A String", # The unique identifier of a place. "internationalPhoneNumber": "A String", # A human-readable phone number for the place, in international format. + "liveMusic": True or False, # Place provides live music. "location": { # An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges. # The position of this place. "latitude": 3.14, # The latitude in degrees. It must be in the range [-90.0, +90.0]. "longitude": 3.14, # The longitude in degrees. It must be in the range [-180.0, +180.0]. }, + "menuForChildren": True or False, # Place has a children's menu. "name": "A String", # An ID representing this place which may be used to look up this place again (a.k.a. the API "resource" name: places/place_id). "nationalPhoneNumber": "A String", # A human-readable phone number for the place, in national format. + "outdoorSeating": True or False, # Place provides outdoor seating. + "parkingOptions": { # Information about parking options for the place. A parking lot could support more than one option at the same time. # Options of parking provided by the place. + "freeGarageParking": True or False, # Place offers free garage parking. + "freeParkingLot": True or False, # Place offers free parking lots. + "freeStreetParking": True or False, # Place offers free street parking. + "paidGarageParking": True or False, # Place offers paid garage parking. + "paidParkingLot": True or False, # Place offers paid parking lots. + "paidStreetParking": True or False, # Place offers paid street parking. + "valetParking": True or False, # Place offers valet parking. + }, + "paymentOptions": { # Payment options the place accepts. # Payment options the place accepts. If a payment option data is not available, the payment option field will be unset. + "acceptsCashOnly": True or False, # Place accepts cash only as payment. Places with this attribute may still accept other payment methods. + "acceptsCreditCards": True or False, # Place accepts credit cards as payment. + "acceptsDebitCards": True or False, # Place accepts debit cards as payment. + "acceptsNfc": True or False, # Place accepts NFC payments. + }, + "photos": [ # Information (including references) about photos of this place. + { # Information about a photo of a place. + "authorAttributions": [ # This photo's authors. + { # Information about the author of the UGC data. Used in Photo, and Review. + "displayName": "A String", # Name of the author of the Photo or Review. + "photoUri": "A String", # Profile photo URI of the author of the Photo or Review. + "uri": "A String", # URI of the author of the Photo or Review. + }, + ], + "heightPx": 42, # The maximum available height, in pixels. + "name": "A String", # Identifier. A reference representing this place photo which may be used to look up this place photo again (a.k.a. the API "resource" name: places/{place_id}/photos/{photo}). + "widthPx": 42, # The maximum available width, in pixels. + }, + ], "plusCode": { # Plus code (http://plus.codes) is a location reference with two formats: global code defining a 14mx14m (1/8000th of a degree) or smaller rectangle, and compound code, replacing the prefix with a reference location. # Plus code of the place location lat/long. "compoundCode": "A String", # Place's compound code, such as "33GV+HQ, Ramberg, Norway", containing the suffix of the global code and replacing the prefix with a formatted name of a reference entity. "globalCode": "A String", # Place's global (full) code, such as "9FWM33GV+HQ", representing an 1/8000 by 1/8000 degree area (~14 by 14 meters). }, "priceLevel": "A String", # Price level of the place. + "primaryType": "A String", # The primary type of the given result. This type must one of the Places API supported types. For example, "restaurant", "cafe", "airport", etc. A place can only have a single primary type. For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types + "primaryTypeDisplayName": { # Localized variant of a text in a particular language. # The display name of the primary type, localized to the request language if applicable. For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types + "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + "text": "A String", # Localized string in the language corresponding to language_code below. + }, "rating": 3.14, # A rating between 1.0 and 5.0, based on user reviews of this place. "regularOpeningHours": { # Information about business hour of the place. # The regular hours of operation. "openNow": True or False, # Is this place open right now? Always present unless we lack time-of-day or timezone data for these opening hours. @@ -374,21 +1225,23 @@

Method Details

}, ], "reservable": True or False, # Specifies if the place supports reservations. - "reviews": [ # List of reviews about this place. + "restroom": True or False, # Place has restroom. + "reviews": [ # List of reviews about this place, sorted by relevance. { # Information about a review of a place. - "authorAttribution": { # Information about the author of the UGC data. Used in Photo, and Review. # Output only. This review's author. - "displayName": "A String", # Output only. Name of the author of the Photo or Review. - "photoUri": "A String", # Output only. Profile photo URI of the author of the Photo or Review. - "uri": "A String", # Output only. URI of the author of the Photo or Review. + "authorAttribution": { # Information about the author of the UGC data. Used in Photo, and Review. # This review's author. + "displayName": "A String", # Name of the author of the Photo or Review. + "photoUri": "A String", # Profile photo URI of the author of the Photo or Review. + "uri": "A String", # URI of the author of the Photo or Review. }, - "originalText": { # Localized variant of a text in a particular language. # Output only. The review text in its original language. + "name": "A String", # A reference representing this place review which may be used to look up this place review again (also called the API "resource" name: places/place_id/reviews/review). + "originalText": { # Localized variant of a text in a particular language. # The review text in its original language. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, - "publishTime": "A String", # Output only. Timestamp for the review. - "rating": 3.14, # Output only. A number between 1.0 and 5.0, a.k.a. the number of stars. - "relativePublishTimeDescription": "A String", # Output only. A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country. - "text": { # Localized variant of a text in a particular language. # Output only. The localized text of the review. + "publishTime": "A String", # Timestamp for the review. + "rating": 3.14, # A number between 1.0 and 5.0, also called the number of stars. + "relativePublishTimeDescription": "A String", # A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country. + "text": { # Localized variant of a text in a particular language. # The localized text of the review. "languageCode": "A String", # The text's BCP-47 language code, such as "en-US" or "sr-Latn". For more information, see http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. "text": "A String", # Localized string in the language corresponding to language_code below. }, @@ -397,12 +1250,22 @@

Method Details

"servesBeer": True or False, # Specifies if the place serves beer. "servesBreakfast": True or False, # Specifies if the place serves breakfast. "servesBrunch": True or False, # Specifies if the place serves brunch. + "servesCocktails": True or False, # Place serves cocktails. + "servesCoffee": True or False, # Place serves coffee. + "servesDessert": True or False, # Place serves dessert. "servesDinner": True or False, # Specifies if the place serves dinner. "servesLunch": True or False, # Specifies if the place serves lunch. "servesVegetarianFood": True or False, # Specifies if the place serves vegetarian food. "servesWine": True or False, # Specifies if the place serves wine. + "shortFormattedAddress": "A String", # A short, human-readable address for this place. + "subDestinations": [ # A list of sub destinations related to the place. + { # Place resource name and id of sub destinations that relate to the place. For example, different terminals are different destinations of an airport. + "id": "A String", # The place id of the sub destination. + "name": "A String", # The resource name of the sub destination. + }, + ], "takeout": True or False, # Specifies if the business supports takeout. - "types": [ # A set of type tags for this result. For example, "political" and "locality". See: https://developers.google.com/maps/documentation/places/web-service/place-types + "types": [ # A set of type tags for this result. For example, "political" and "locality". For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types "A String", ], "userRatingCount": 42, # The total number of reviews (with or without text) for this place. diff --git a/docs/dyn/places_v1.places.photos.html b/docs/dyn/places_v1.places.photos.html new file mode 100644 index 00000000000..e1ac02320b0 --- /dev/null +++ b/docs/dyn/places_v1.places.photos.html @@ -0,0 +1,112 @@ + + + +

Places API (New) . places . photos

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ getMedia(name, maxHeightPx=None, maxWidthPx=None, skipHttpRedirect=None, x__xgafv=None)

+

Get a photo media with a photo reference string.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ getMedia(name, maxHeightPx=None, maxWidthPx=None, skipHttpRedirect=None, x__xgafv=None) +
Get a photo media with a photo reference string.
+
+Args:
+  name: string, Required. The resource name of a photo media in the format: `places/place_id/photos/photo_reference/media`. The resource name of a photo as returned in a Place object's photos.name field comes with the format `places/place_id/photos/photo_reference`. You need to append `/media` at the end of the photo resource to get the photo media resource name. (required)
+  maxHeightPx: integer, Optional. Specifies the maximum desired height, in pixels, of the image. If the image is smaller than the values specified, the original image will be returned. If the image is larger in either dimension, it will be scaled to match the smaller of the two dimensions, restricted to its original aspect ratio. Both the max_height_px and max_width_px properties accept an integer between 1 and 4800, inclusively. If the value is not within the allowed range, an INVALID_ARGUMENT error will be returned. At least one of max_height_px or max_width_px needs to be specified. If neither max_height_px nor max_width_px is specified, an INVALID_ARGUMENT error will be returned.
+  maxWidthPx: integer, Optional. Specifies the maximum desired width, in pixels, of the image. If the image is smaller than the values specified, the original image will be returned. If the image is larger in either dimension, it will be scaled to match the smaller of the two dimensions, restricted to its original aspect ratio. Both the max_height_px and max_width_px properties accept an integer between 1 and 4800, inclusively. If the value is not within the allowed range, an INVALID_ARGUMENT error will be returned. At least one of max_height_px or max_width_px needs to be specified. If neither max_height_px nor max_width_px is specified, an INVALID_ARGUMENT error will be returned.
+  skipHttpRedirect: boolean, Optional. If set, skip the default HTTP redirect behavior and render a text format (for example, in JSON format for HTTP use case) response. If not set, an HTTP redirect will be issued to redirect the call to the image midea. This option is ignored for non-HTTP requests.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A photo media from Places API.
+  "name": "A String", # The resource name of a photo media in the format: `places/place_id/photos/photo_reference/media`.
+  "photoUri": "A String", # A short-lived uri that can be used to render the photo.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/pubsub_v1.projects.schemas.html b/docs/dyn/pubsub_v1.projects.schemas.html index de59e482d32..356b947c92b 100644 --- a/docs/dyn/pubsub_v1.projects.schemas.html +++ b/docs/dyn/pubsub_v1.projects.schemas.html @@ -181,7 +181,7 @@

Method Details

"type": "A String", # The type of the schema definition. } - schemaId: string, The ID to use for the schema, which will become the final component of the schema's resource name. See https://cloud.google.com/pubsub/docs/admin#resource_names for resource name constraints. + schemaId: string, The ID to use for the schema, which will become the final component of the schema's resource name. See https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names for resource name constraints. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/pubsub_v1.projects.snapshots.html b/docs/dyn/pubsub_v1.projects.snapshots.html index eecf072d290..4f61dcdee81 100644 --- a/docs/dyn/pubsub_v1.projects.snapshots.html +++ b/docs/dyn/pubsub_v1.projects.snapshots.html @@ -79,7 +79,7 @@

Instance Methods

Close httplib2 connections.

create(name, body=None, x__xgafv=None)

-

Creates a snapshot from the requested subscription. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. If the snapshot already exists, returns `ALREADY_EXISTS`. If the requested subscription doesn't exist, returns `NOT_FOUND`. If the backlog in the subscription is too old -- and the resulting snapshot would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. See also the `Snapshot.expire_time` field. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Snapshot object. Note that for REST API requests, you must specify a name in the request.

+

Creates a snapshot from the requested subscription. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. If the snapshot already exists, returns `ALREADY_EXISTS`. If the requested subscription doesn't exist, returns `NOT_FOUND`. If the backlog in the subscription is too old -- and the resulting snapshot would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. See also the `Snapshot.expire_time` field. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The generated name is populated in the returned Snapshot object. Note that for REST API requests, you must specify a name in the request.

delete(snapshot, x__xgafv=None)

Removes an existing snapshot. Snapshots are used in [Seek] (https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. When the snapshot is deleted, all messages retained in the snapshot are immediately dropped. After a snapshot is deleted, a new one may be created with the same name, but the new one has no association with the old snapshot or its subscription, unless the same subscription is specified.

@@ -112,10 +112,10 @@

Method Details

create(name, body=None, x__xgafv=None) -
Creates a snapshot from the requested subscription. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. If the snapshot already exists, returns `ALREADY_EXISTS`. If the requested subscription doesn't exist, returns `NOT_FOUND`. If the backlog in the subscription is too old -- and the resulting snapshot would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. See also the `Snapshot.expire_time` field. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Snapshot object. Note that for REST API requests, you must specify a name in the request.
+  
Creates a snapshot from the requested subscription. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. If the snapshot already exists, returns `ALREADY_EXISTS`. If the requested subscription doesn't exist, returns `NOT_FOUND`. If the backlog in the subscription is too old -- and the resulting snapshot would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. See also the `Snapshot.expire_time` field. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The generated name is populated in the returned Snapshot object. Note that for REST API requests, you must specify a name in the request.
 
 Args:
-  name: string, Required. User-provided name for this snapshot. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription. Note that for REST API requests, you must specify a name. See the [resource name rules](https://cloud.google.com/pubsub/docs/admin#resource_names). Format is `projects/{project}/snapshots/{snap}`. (required)
+  name: string, Required. User-provided name for this snapshot. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription. Note that for REST API requests, you must specify a name. See the [resource name rules](https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). Format is `projects/{project}/snapshots/{snap}`. (required)
   body: object, The request body.
     The object takes the form of:
 
diff --git a/docs/dyn/pubsub_v1.projects.subscriptions.html b/docs/dyn/pubsub_v1.projects.subscriptions.html
index 23c278a09db..e3eb9cd4aa1 100644
--- a/docs/dyn/pubsub_v1.projects.subscriptions.html
+++ b/docs/dyn/pubsub_v1.projects.subscriptions.html
@@ -82,7 +82,7 @@ 

Instance Methods

Close httplib2 connections.

create(name, body=None, x__xgafv=None)

-

Creates a subscription to a given topic. See the [resource name rules] (https://cloud.google.com/pubsub/docs/admin#resource_names). If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Subscription object. Note that for REST API requests, you must specify a name in the request.

+

Creates a subscription to a given topic. See the [resource name rules] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The generated name is populated in the returned Subscription object. Note that for REST API requests, you must specify a name in the request.

delete(subscription, x__xgafv=None)

Deletes an existing subscription. All messages retained in the subscription are immediately dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is deleted, a new one may be created with the same name, but the new one has no association with the old subscription or its topic unless the same topic is specified.

@@ -157,7 +157,7 @@

Method Details

create(name, body=None, x__xgafv=None) -
Creates a subscription to a given topic. See the [resource name rules] (https://cloud.google.com/pubsub/docs/admin#resource_names). If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Subscription object. Note that for REST API requests, you must specify a name in the request.
+  
Creates a subscription to a given topic. See the [resource name rules] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The generated name is populated in the returned Subscription object. Note that for REST API requests, you must specify a name in the request.
 
 Args:
   name: string, Required. The name of the subscription. It must have the format `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `"goog"`. (required)
diff --git a/docs/dyn/pubsub_v1.projects.topics.html b/docs/dyn/pubsub_v1.projects.topics.html
index b2e3ec03e68..bb8690f9c99 100644
--- a/docs/dyn/pubsub_v1.projects.topics.html
+++ b/docs/dyn/pubsub_v1.projects.topics.html
@@ -89,7 +89,7 @@ 

Instance Methods

Close httplib2 connections.

create(name, body=None, x__xgafv=None)

-

Creates the given topic with the given name. See the [resource name rules] (https://cloud.google.com/pubsub/docs/admin#resource_names).

+

Creates the given topic with the given name. See the [resource name rules] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names).

delete(topic, x__xgafv=None)

Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a topic is deleted, a new topic may be created with the same name; this is an entirely new topic with none of the old configuration or subscriptions. Existing subscriptions to this topic are not deleted, but their `topic` field is set to `_deleted-topic_`.

@@ -125,7 +125,7 @@

Method Details

create(name, body=None, x__xgafv=None) -
Creates the given topic with the given name. See the [resource name rules] (https://cloud.google.com/pubsub/docs/admin#resource_names).
+  
Creates the given topic with the given name. See the [resource name rules] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names).
 
 Args:
   name: string, Required. The name of the topic. It must have the format `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in length, and it must not start with `"goog"`. (required)
@@ -139,7 +139,7 @@ 

Method Details

}, "messageRetentionDuration": "A String", # Optional. Indicates the minimum duration to retain a message after it is published to the topic. If this field is set, messages published to the topic in the last `message_retention_duration` are always available to subscribers. For instance, it allows any attached subscription to [seek to a timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) that is up to `message_retention_duration` in the past. If this field is not set, message retention is controlled by settings on individual subscriptions. Cannot be more than 31 days or less than 10 minutes. "messageStoragePolicy": { # A policy constraining the storage of messages published to the topic. # Optional. Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect. - "allowedPersistenceRegions": [ # Optional. A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. + "allowedPersistenceRegions": [ # Optional. A list of IDs of Google Cloud regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed Google Cloud regions (or running outside of Google Cloud altogether) are routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. "A String", ], }, @@ -168,7 +168,7 @@

Method Details

}, "messageRetentionDuration": "A String", # Optional. Indicates the minimum duration to retain a message after it is published to the topic. If this field is set, messages published to the topic in the last `message_retention_duration` are always available to subscribers. For instance, it allows any attached subscription to [seek to a timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) that is up to `message_retention_duration` in the past. If this field is not set, message retention is controlled by settings on individual subscriptions. Cannot be more than 31 days or less than 10 minutes. "messageStoragePolicy": { # A policy constraining the storage of messages published to the topic. # Optional. Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect. - "allowedPersistenceRegions": [ # Optional. A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. + "allowedPersistenceRegions": [ # Optional. A list of IDs of Google Cloud regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed Google Cloud regions (or running outside of Google Cloud altogether) are routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. "A String", ], }, @@ -222,7 +222,7 @@

Method Details

}, "messageRetentionDuration": "A String", # Optional. Indicates the minimum duration to retain a message after it is published to the topic. If this field is set, messages published to the topic in the last `message_retention_duration` are always available to subscribers. For instance, it allows any attached subscription to [seek to a timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) that is up to `message_retention_duration` in the past. If this field is not set, message retention is controlled by settings on individual subscriptions. Cannot be more than 31 days or less than 10 minutes. "messageStoragePolicy": { # A policy constraining the storage of messages published to the topic. # Optional. Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect. - "allowedPersistenceRegions": [ # Optional. A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. + "allowedPersistenceRegions": [ # Optional. A list of IDs of Google Cloud regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed Google Cloud regions (or running outside of Google Cloud altogether) are routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. "A String", ], }, @@ -298,7 +298,7 @@

Method Details

}, "messageRetentionDuration": "A String", # Optional. Indicates the minimum duration to retain a message after it is published to the topic. If this field is set, messages published to the topic in the last `message_retention_duration` are always available to subscribers. For instance, it allows any attached subscription to [seek to a timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) that is up to `message_retention_duration` in the past. If this field is not set, message retention is controlled by settings on individual subscriptions. Cannot be more than 31 days or less than 10 minutes. "messageStoragePolicy": { # A policy constraining the storage of messages published to the topic. # Optional. Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect. - "allowedPersistenceRegions": [ # Optional. A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. + "allowedPersistenceRegions": [ # Optional. A list of IDs of Google Cloud regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed Google Cloud regions (or running outside of Google Cloud altogether) are routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. "A String", ], }, @@ -346,7 +346,7 @@

Method Details

}, "messageRetentionDuration": "A String", # Optional. Indicates the minimum duration to retain a message after it is published to the topic. If this field is set, messages published to the topic in the last `message_retention_duration` are always available to subscribers. For instance, it allows any attached subscription to [seek to a timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) that is up to `message_retention_duration` in the past. If this field is not set, message retention is controlled by settings on individual subscriptions. Cannot be more than 31 days or less than 10 minutes. "messageStoragePolicy": { # A policy constraining the storage of messages published to the topic. # Optional. Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect. - "allowedPersistenceRegions": [ # Optional. A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. + "allowedPersistenceRegions": [ # Optional. A list of IDs of Google Cloud regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed Google Cloud regions (or running outside of Google Cloud altogether) are routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. "A String", ], }, @@ -377,7 +377,7 @@

Method Details

}, "messageRetentionDuration": "A String", # Optional. Indicates the minimum duration to retain a message after it is published to the topic. If this field is set, messages published to the topic in the last `message_retention_duration` are always available to subscribers. For instance, it allows any attached subscription to [seek to a timestamp](https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time) that is up to `message_retention_duration` in the past. If this field is not set, message retention is controlled by settings on individual subscriptions. Cannot be more than 31 days or less than 10 minutes. "messageStoragePolicy": { # A policy constraining the storage of messages published to the topic. # Optional. Policy constraining the set of Google Cloud Platform regions where messages published to the topic may be stored. If not present, then no constraints are in effect. - "allowedPersistenceRegions": [ # Optional. A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. + "allowedPersistenceRegions": [ # Optional. A list of IDs of Google Cloud regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed Google Cloud regions (or running outside of Google Cloud altogether) are routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration. "A String", ], }, diff --git a/docs/dyn/recaptchaenterprise_v1.projects.assessments.html b/docs/dyn/recaptchaenterprise_v1.projects.assessments.html index a718a3568b6..b5bebe96a30 100644 --- a/docs/dyn/recaptchaenterprise_v1.projects.assessments.html +++ b/docs/dyn/recaptchaenterprise_v1.projects.assessments.html @@ -94,8 +94,9 @@

Method Details

The object takes the form of: { # The request message to annotate an Assessment. + "accountId": "A String", # Optional. A stable account identifier to apply to the assessment. This is an alternative to setting `account_id` in `CreateAssessment`, for example when a stable account identifier is not yet known in the initial request. "annotation": "A String", # Optional. The annotation that will be assigned to the Event. This field can be left empty to provide reasons that apply to an event without concluding whether the event is legitimate or fraudulent. - "hashedAccountId": "A String", # Optional. Unique stable hashed user identifier to apply to the assessment. This is an alternative to setting the hashed_account_id in CreateAssessment, for example when the account identifier is not yet known in the initial request. It is recommended that the identifier is hashed using hmac-sha256 with stable secret. + "hashedAccountId": "A String", # Optional. A stable hashed account identifier to apply to the assessment. This is an alternative to setting `hashed_account_id` in `CreateAssessment`, for example when a stable account identifier is not yet known in the initial request. "reasons": [ # Optional. Reasons for the annotation that are assigned to the event. "A String", ], @@ -134,7 +135,7 @@

Method Details

The object takes the form of: { # A reCAPTCHA Enterprise assessment resource. - "accountDefenderAssessment": { # Account defender risk assessment. # Output only. Assessment returned by account defender when a hashed_account_id is provided. + "accountDefenderAssessment": { # Account defender risk assessment. # Output only. Assessment returned by account defender when an account identifier is provided. "labels": [ # Output only. Labels for this request. "A String", ], @@ -156,7 +157,7 @@

Method Details

"expectedAction": "A String", # Optional. The expected action for this type of event. This should be the same action provided at token generation time on client-side platforms already integrated with recaptcha enterprise. "express": True or False, # Optional. Flag for a reCAPTCHA express request for an assessment without a token. If enabled, `site_key` must reference a SCORE key with WAF feature set to EXPRESS. "firewallPolicyEvaluation": True or False, # Optional. Flag for enabling firewall policy config assessment. If this flag is enabled, the firewall policy will be evaluated and a suggested firewall action will be returned in the response. - "hashedAccountId": "A String", # Optional. Unique stable hashed user identifier for the request. The identifier must be hashed using hmac-sha256 with stable secret. + "hashedAccountId": "A String", # Optional. Deprecated: use `user_info.account_id` instead. Unique stable hashed user identifier for the request. The identifier must be hashed using hmac-sha256 with stable secret. "headers": [ # Optional. HTTP header information about the request. "A String", ], @@ -226,6 +227,17 @@

Method Details

"value": 3.14, # Optional. The decimal value of the transaction in the specified currency. }, "userAgent": "A String", # Optional. The user agent present in the request from the user's device related to this event. + "userInfo": { # User information associated with a request protected by reCAPTCHA Enterprise. # Optional. Information about the user that generates this event, when they can be identified. They are often identified through the use of an account for logged-in requests or login/registration requests, or by providing user identifiers for guest actions like checkout. + "accountId": "A String", # Optional. For logged-in requests or login/registration requests, the unique account identifier associated with this user. You can use the username if it is stable (meaning it is the same for every request associated with the same user), or any stable user ID of your choice. Leave blank for non logged-in actions or guest checkout. + "createAccountTime": "A String", # Optional. Creation time for this account associated with this user. Leave blank for non logged-in actions, guest checkout, or when there is no account associated with the current user. + "userIds": [ # Optional. Identifiers associated with this user or request. + { # An identifier associated with a user. + "email": "A String", # Optional. An email address. + "phoneNumber": "A String", # Optional. A phone number. Should use the E.164 format. + "username": "A String", # Optional. A unique username, if different from all the other identifiers and `account_id` that are provided. Can be a unique login handle or display name for a user. + }, + ], + }, "userIpAddress": "A String", # Optional. The IP address in the request from the user's device related to this event. "wafTokenAssessment": True or False, # Optional. Flag for running WAF token assessment. If enabled, the token must be specified, and have been created by a WAF-enabled key. }, @@ -324,7 +336,7 @@

Method Details

An object of the form: { # A reCAPTCHA Enterprise assessment resource. - "accountDefenderAssessment": { # Account defender risk assessment. # Output only. Assessment returned by account defender when a hashed_account_id is provided. + "accountDefenderAssessment": { # Account defender risk assessment. # Output only. Assessment returned by account defender when an account identifier is provided. "labels": [ # Output only. Labels for this request. "A String", ], @@ -346,7 +358,7 @@

Method Details

"expectedAction": "A String", # Optional. The expected action for this type of event. This should be the same action provided at token generation time on client-side platforms already integrated with recaptcha enterprise. "express": True or False, # Optional. Flag for a reCAPTCHA express request for an assessment without a token. If enabled, `site_key` must reference a SCORE key with WAF feature set to EXPRESS. "firewallPolicyEvaluation": True or False, # Optional. Flag for enabling firewall policy config assessment. If this flag is enabled, the firewall policy will be evaluated and a suggested firewall action will be returned in the response. - "hashedAccountId": "A String", # Optional. Unique stable hashed user identifier for the request. The identifier must be hashed using hmac-sha256 with stable secret. + "hashedAccountId": "A String", # Optional. Deprecated: use `user_info.account_id` instead. Unique stable hashed user identifier for the request. The identifier must be hashed using hmac-sha256 with stable secret. "headers": [ # Optional. HTTP header information about the request. "A String", ], @@ -416,6 +428,17 @@

Method Details

"value": 3.14, # Optional. The decimal value of the transaction in the specified currency. }, "userAgent": "A String", # Optional. The user agent present in the request from the user's device related to this event. + "userInfo": { # User information associated with a request protected by reCAPTCHA Enterprise. # Optional. Information about the user that generates this event, when they can be identified. They are often identified through the use of an account for logged-in requests or login/registration requests, or by providing user identifiers for guest actions like checkout. + "accountId": "A String", # Optional. For logged-in requests or login/registration requests, the unique account identifier associated with this user. You can use the username if it is stable (meaning it is the same for every request associated with the same user), or any stable user ID of your choice. Leave blank for non logged-in actions or guest checkout. + "createAccountTime": "A String", # Optional. Creation time for this account associated with this user. Leave blank for non logged-in actions, guest checkout, or when there is no account associated with the current user. + "userIds": [ # Optional. Identifiers associated with this user or request. + { # An identifier associated with a user. + "email": "A String", # Optional. An email address. + "phoneNumber": "A String", # Optional. A phone number. Should use the E.164 format. + "username": "A String", # Optional. A unique username, if different from all the other identifiers and `account_id` that are provided. Can be a unique login handle or display name for a user. + }, + ], + }, "userIpAddress": "A String", # Optional. The IP address in the request from the user's device related to this event. "wafTokenAssessment": True or False, # Optional. Flag for running WAF token assessment. If enabled, the token must be specified, and have been created by a WAF-enabled key. }, diff --git a/docs/dyn/run_v1.namespaces.configurations.html b/docs/dyn/run_v1.namespaces.configurations.html index a42dd00436a..a36300ac2f2 100644 --- a/docs/dyn/run_v1.namespaces.configurations.html +++ b/docs/dyn/run_v1.namespaces.configurations.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of resource, in this case always "Configuration". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Configuration, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -141,7 +141,7 @@

Method Details

"spec": { # ConfigurationSpec holds the desired state of the Configuration (from the client). # Spec holds the desired state of the Configuration (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Template holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -445,7 +445,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of resource, in this case always "Configuration". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Configuration, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -479,7 +479,7 @@

Method Details

"spec": { # ConfigurationSpec holds the desired state of the Configuration (from the client). # Spec holds the desired state of the Configuration (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Template holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.domainmappings.html b/docs/dyn/run_v1.namespaces.domainmappings.html index 8e99b750916..d3fedfc7344 100644 --- a/docs/dyn/run_v1.namespaces.domainmappings.html +++ b/docs/dyn/run_v1.namespaces.domainmappings.html @@ -108,7 +108,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -181,7 +181,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -305,7 +305,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -394,7 +394,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.executions.html b/docs/dyn/run_v1.namespaces.executions.html index 99125815a12..7146038b64e 100644 --- a/docs/dyn/run_v1.namespaces.executions.html +++ b/docs/dyn/run_v1.namespaces.executions.html @@ -114,7 +114,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -461,7 +461,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -769,7 +769,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.jobs.html b/docs/dyn/run_v1.namespaces.jobs.html index b00304c8c24..ba97b4dfad8 100644 --- a/docs/dyn/run_v1.namespaces.jobs.html +++ b/docs/dyn/run_v1.namespaces.jobs.html @@ -114,7 +114,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -148,7 +148,7 @@

Method Details

"spec": { # JobSpec describes how the job will look. # Optional. Specification of the desired behavior of a job. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -439,7 +439,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -473,7 +473,7 @@

Method Details

"spec": { # JobSpec describes how the job will look. # Optional. Specification of the desired behavior of a job. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -815,7 +815,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -849,7 +849,7 @@

Method Details

"spec": { # JobSpec describes how the job will look. # Optional. Specification of the desired behavior of a job. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1157,7 +1157,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1191,7 +1191,7 @@

Method Details

"spec": { # JobSpec describes how the job will look. # Optional. Specification of the desired behavior of a job. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1495,7 +1495,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1529,7 +1529,7 @@

Method Details

"spec": { # JobSpec describes how the job will look. # Optional. Specification of the desired behavior of a job. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1820,7 +1820,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1854,7 +1854,7 @@

Method Details

"spec": { # JobSpec describes how the job will look. # Optional. Specification of the desired behavior of a job. "template": { # ExecutionTemplateSpec describes the metadata and spec an Execution should have when created from a job. # Optional. Describes the execution that will be created when running a job. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Optional metadata for this Execution, including labels and annotations. The following annotation keys set properties of the created execution: * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -2195,7 +2195,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.revisions.html b/docs/dyn/run_v1.namespaces.revisions.html index 5228023cd16..094707cec55 100644 --- a/docs/dyn/run_v1.namespaces.revisions.html +++ b/docs/dyn/run_v1.namespaces.revisions.html @@ -155,7 +155,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case "Revision". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Revision, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -459,7 +459,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case "Revision". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Revision, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.routes.html b/docs/dyn/run_v1.namespaces.routes.html index 7205d5a51e5..ebac2457354 100644 --- a/docs/dyn/run_v1.namespaces.routes.html +++ b/docs/dyn/run_v1.namespaces.routes.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case always "Route". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Route, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -208,7 +208,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case always "Route". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Route, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.services.html b/docs/dyn/run_v1.namespaces.services.html index dd11fe3ca5e..4cc3eb85132 100644 --- a/docs/dyn/run_v1.namespaces.services.html +++ b/docs/dyn/run_v1.namespaces.services.html @@ -111,7 +111,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -145,7 +145,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -457,7 +457,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -491,7 +491,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -854,7 +854,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -888,7 +888,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1216,7 +1216,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1250,7 +1250,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1574,7 +1574,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1608,7 +1608,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1920,7 +1920,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1954,7 +1954,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.namespaces.tasks.html b/docs/dyn/run_v1.namespaces.tasks.html index 78411bcfa67..77def4e8bc5 100644 --- a/docs/dyn/run_v1.namespaces.tasks.html +++ b/docs/dyn/run_v1.namespaces.tasks.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -418,7 +418,7 @@

Method Details

"apiVersion": "A String", # Optional. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. "kind": "A String", # Optional. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional. Standard object's metadata. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.configurations.html b/docs/dyn/run_v1.projects.locations.configurations.html index 92fde2a9fb5..9f9cdb3cf56 100644 --- a/docs/dyn/run_v1.projects.locations.configurations.html +++ b/docs/dyn/run_v1.projects.locations.configurations.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of resource, in this case always "Configuration". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Configuration, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -141,7 +141,7 @@

Method Details

"spec": { # ConfigurationSpec holds the desired state of the Configuration (from the client). # Spec holds the desired state of the Configuration (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Template holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -445,7 +445,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of resource, in this case always "Configuration". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Configuration, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -479,7 +479,7 @@

Method Details

"spec": { # ConfigurationSpec holds the desired state of the Configuration (from the client). # Spec holds the desired state of the Configuration (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Template holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.domainmappings.html b/docs/dyn/run_v1.projects.locations.domainmappings.html index 5ace77a5899..0faff9a7008 100644 --- a/docs/dyn/run_v1.projects.locations.domainmappings.html +++ b/docs/dyn/run_v1.projects.locations.domainmappings.html @@ -108,7 +108,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -181,7 +181,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -305,7 +305,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -394,7 +394,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "domains.cloudrun.com/v1". "kind": "A String", # The kind of resource, in this case "DomainMapping". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this BuildTemplate. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.revisions.html b/docs/dyn/run_v1.projects.locations.revisions.html index 55d12479ad8..2c7a34cb003 100644 --- a/docs/dyn/run_v1.projects.locations.revisions.html +++ b/docs/dyn/run_v1.projects.locations.revisions.html @@ -155,7 +155,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case "Revision". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Revision, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -459,7 +459,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case "Revision". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Revision, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.routes.html b/docs/dyn/run_v1.projects.locations.routes.html index 1c63bed09de..fc44ced87ba 100644 --- a/docs/dyn/run_v1.projects.locations.routes.html +++ b/docs/dyn/run_v1.projects.locations.routes.html @@ -107,7 +107,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case always "Route". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Route, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -208,7 +208,7 @@

Method Details

"apiVersion": "A String", # The API version for this call such as "serving.knative.dev/v1". "kind": "A String", # The kind of this resource, in this case always "Route". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Route, including name, namespace, labels, and annotations. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v1.projects.locations.services.html b/docs/dyn/run_v1.projects.locations.services.html index 119022b0ca4..05c90f39447 100644 --- a/docs/dyn/run_v1.projects.locations.services.html +++ b/docs/dyn/run_v1.projects.locations.services.html @@ -120,7 +120,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -154,7 +154,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -466,7 +466,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -500,7 +500,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -863,7 +863,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -897,7 +897,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1273,7 +1273,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1307,7 +1307,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1631,7 +1631,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1665,7 +1665,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -1977,7 +1977,7 @@

Method Details

"apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/description` * `run.googleapis.com/disable-default-url` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run @@ -2011,7 +2011,7 @@

Method Details

"spec": { # ServiceSpec holds the desired state of the Route (from the client), which is used to manipulate the underlying Route and Configuration(s). # Holds the desired state of the Service (from the client). "template": { # RevisionTemplateSpec describes the data a revision should have when created from a template. # Holds the latest specification for the Revision to be stamped out. "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Optional metadata for this Revision, including labels and annotations. Name will be generated by the Configuration. The following annotation keys set properties of the created revision: * `autoscaling.knative.dev/minScale` sets the minimum number of instances. * `autoscaling.knative.dev/maxScale` sets the maximum number of instances. * `run.googleapis.com/cloudsql-instances` sets Cloud SQL connections. Multiple values should be comma separated. * `run.googleapis.com/vpc-access-connector` sets a Serverless VPC Access connector. * `run.googleapis.com/vpc-access-egress` sets VPC egress. Supported values are `all-traffic`, `all` (deprecated), and `private-ranges-only`. `all-traffic` and `all` provide the same functionality. `all` is deprecated but will continue to be supported. Prefer `all-traffic`. - "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. + "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, "clusterName": "A String", # Not supported by Cloud Run diff --git a/docs/dyn/run_v2.projects.locations.services.html b/docs/dyn/run_v2.projects.locations.services.html index 3dc670111eb..bc7601c6202 100644 --- a/docs/dyn/run_v2.projects.locations.services.html +++ b/docs/dyn/run_v2.projects.locations.services.html @@ -168,6 +168,9 @@

Method Details

"observedGeneration": "A String", # Output only. The generation of this Service currently serving traffic. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. "reconciling": True or False, # Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. "satisfiesPzs": True or False, # Output only. Reserved for future use. + "scaling": { # Scaling settings that apply to the service as a whole rather than the individual revision. # Optional. Specifies service-level scaling settings + "minInstanceCount": 42, # total min instances for the service. This number of instances will be divide among all revisions with specified traffic based on the percent of traffic they are receiving. (ALPHA) + }, "template": { # RevisionTemplate describes the data a revision should have when created from a template. # Required. The template used to create revisions for this Service. "annotations": { # Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", @@ -476,6 +479,9 @@

Method Details

"observedGeneration": "A String", # Output only. The generation of this Service currently serving traffic. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. "reconciling": True or False, # Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. "satisfiesPzs": True or False, # Output only. Reserved for future use. + "scaling": { # Scaling settings that apply to the service as a whole rather than the individual revision. # Optional. Specifies service-level scaling settings + "minInstanceCount": 42, # total min instances for the service. This number of instances will be divide among all revisions with specified traffic based on the percent of traffic they are receiving. (ALPHA) + }, "template": { # RevisionTemplate describes the data a revision should have when created from a template. # Required. The template used to create revisions for this Service. "annotations": { # Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", @@ -771,6 +777,9 @@

Method Details

"observedGeneration": "A String", # Output only. The generation of this Service currently serving traffic. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. "reconciling": True or False, # Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. "satisfiesPzs": True or False, # Output only. Reserved for future use. + "scaling": { # Scaling settings that apply to the service as a whole rather than the individual revision. # Optional. Specifies service-level scaling settings + "minInstanceCount": 42, # total min instances for the service. This number of instances will be divide among all revisions with specified traffic based on the percent of traffic they are receiving. (ALPHA) + }, "template": { # RevisionTemplate describes the data a revision should have when created from a template. # Required. The template used to create revisions for this Service. "annotations": { # Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", @@ -1023,6 +1032,9 @@

Method Details

"observedGeneration": "A String", # Output only. The generation of this Service currently serving traffic. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a `string` instead of an `integer`. "reconciling": True or False, # Output only. Returns true if the Service is currently being acted upon by the system to bring it into the desired state. When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, `observed_generation`, `latest_ready_revison`, `traffic_statuses`, and `uri` will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in `terminal_condition.state`. If reconciliation succeeded, the following fields will match: `traffic` and `traffic_statuses`, `observed_generation` and `generation`, `latest_ready_revision` and `latest_created_revision`. If reconciliation failed, `traffic_statuses`, `observed_generation`, and `latest_ready_revision` will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in `terminal_condition` and `conditions`. "satisfiesPzs": True or False, # Output only. Reserved for future use. + "scaling": { # Scaling settings that apply to the service as a whole rather than the individual revision. # Optional. Specifies service-level scaling settings + "minInstanceCount": 42, # total min instances for the service. This number of instances will be divide among all revisions with specified traffic based on the percent of traffic they are receiving. (ALPHA) + }, "template": { # RevisionTemplate describes the data a revision should have when created from a template. # Required. The template used to create revisions for this Service. "annotations": { # Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", diff --git a/docs/dyn/servicenetworking_v1.services.html b/docs/dyn/servicenetworking_v1.services.html index 5c2c660712b..e6017a0d929 100644 --- a/docs/dyn/servicenetworking_v1.services.html +++ b/docs/dyn/servicenetworking_v1.services.html @@ -134,6 +134,7 @@

Method Details

"consumer": "A String", # Required. A resource that represents the service consumer, such as `projects/123456`. The project number can be different from the value in the consumer network parameter. For example, the network might be part of a Shared VPC network. In those cases, Service Networking validates that this resource belongs to that Shared VPC. "consumerNetwork": "A String", # Required. The name of the service consumer's VPC network. The network must have an existing private connection that was provisioned through the connections.create method. The name must be in the following format: `projects/{project}/global/networks/{network}`, where {project} is a project number, such as `12345`. {network} is the name of a VPC network in the project. "description": "A String", # Optional. Description of the subnet. + "internalRange": "A String", # Optional. The url of an Internal Range. Eg: `projects//locations/global/internalRanges/`. If specified, it means that the subnetwork cidr will be created using the combination of requested_address/ip_prefix_length. Note that the subnet cidr has to be within the cidr range of this Internal Range. "ipPrefixLength": 42, # Required. The prefix length of the subnet's IP address range. Use CIDR range notation, such as `29` to provision a subnet with an `x.x.x.x/29` CIDR range. The IP address range is drawn from a pool of available ranges in the service consumer's allocated range. GCE disallows subnets with prefix_length > 29 "outsideAllocationPublicIpRange": "A String", # Optional. Enable outside allocation using public IP addresses. Any public IP range may be specified. If this field is provided, we will not use customer reserved ranges for this primary IP range. "privateIpv6GoogleAccess": "A String", # Optional. The private IPv6 google access type for the VMs in this subnet. For information about the access types that can be set using this field, see [subnetwork](https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks) in the Compute API documentation. diff --git a/docs/dyn/servicenetworking_v1.services.projects.global_.networks.html b/docs/dyn/servicenetworking_v1.services.projects.global_.networks.html index d7bad5f040b..8f2c976178c 100644 --- a/docs/dyn/servicenetworking_v1.services.projects.global_.networks.html +++ b/docs/dyn/servicenetworking_v1.services.projects.global_.networks.html @@ -90,6 +90,9 @@

Instance Methods

get(name, includeUsedIpRanges=None, x__xgafv=None)

Service producers use this method to get the configuration of their connection including the import/export of custom routes and subnetwork routes with public IP.

+

+ getVpcServiceControls(name, x__xgafv=None)

+

Consumers use this method to find out the state of VPC Service Controls. The controls could be enabled or disabled for a connection.

updateConsumerConfig(parent, body=None, x__xgafv=None)

Service producers use this method to update the configuration of their connection including the import/export of custom routes and subnetwork routes with public IP.

@@ -145,6 +148,25 @@

Method Details

}
+
+ getVpcServiceControls(name, x__xgafv=None) +
Consumers use this method to find out the state of VPC Service Controls. The controls could be enabled or disabled for a connection.
+
+Args:
+  name: string, Required. Name of the VPC Service Controls config to retrieve in the format: `services/{service}/projects/{project}/global/networks/{network}`. {service} is the peering service that is managing connectivity for the service producer's organization. For Google services that support this functionality, this value is `servicenetworking.googleapis.com`. {project} is a project number e.g. `12345` that contains the service consumer's VPC network. {network} is the name of the service consumer's VPC network. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for the get VPC Service Controls request.
+  "enabled": True or False, # Output only. Indicates whether the VPC Service Controls are enabled or disabled for the connection. If the consumer called the EnableVpcServiceControls method, then this is true. If the consumer called DisableVpcServiceControls, then this is false. The default is false.
+}
+
+
updateConsumerConfig(parent, body=None, x__xgafv=None)
Service producers use this method to update the configuration of their connection including the import/export of custom routes and subnetwork routes with public IP.
diff --git a/docs/dyn/spanner_v1.projects.instances.html b/docs/dyn/spanner_v1.projects.instances.html
index 6d2287311a3..7e5025a7e7c 100644
--- a/docs/dyn/spanner_v1.projects.instances.html
+++ b/docs/dyn/spanner_v1.projects.instances.html
@@ -151,6 +151,18 @@ 

Method Details

{ # The request for CreateInstance. "instance": { # An isolated set of Cloud Spanner resources on which databases can be hosted. # Required. The instance to create. The name may be omitted, but if specified must be `/instances/`. + "autoscalingConfig": { # Autoscaling config for an instance. # Optional. The autoscaling configuration. Autoscaling is enabled if this field is set. When autoscaling is enabled, node_count and processing_units are treated as OUTPUT_ONLY fields and reflect the current compute capacity allocated to the instance. + "autoscalingLimits": { # The autoscaling limits for the instance. Users can define the minimum and maximum compute capacity allocated to the instance, and the autoscaler will only scale within that range. Users can either use nodes or processing units to specify the limits, but should use the same unit to set both the min_limit and max_limit. # Required. Autoscaling limits for an instance. + "maxNodes": 42, # Maximum number of nodes allocated to the instance. If set, this number should be greater than or equal to min_nodes. + "maxProcessingUnits": 42, # Maximum number of processing units allocated to the instance. If set, this number should be multiples of 1000 and be greater than or equal to min_processing_units. + "minNodes": 42, # Minimum number of nodes allocated to the instance. If set, this number should be greater than or equal to 1. + "minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. + }, + "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. + "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 100] inclusive. + }, + }, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. "createTime": "A String", # Output only. The time at which the instance was created. "displayName": "A String", # Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. @@ -238,6 +250,18 @@

Method Details

An object of the form: { # An isolated set of Cloud Spanner resources on which databases can be hosted. + "autoscalingConfig": { # Autoscaling config for an instance. # Optional. The autoscaling configuration. Autoscaling is enabled if this field is set. When autoscaling is enabled, node_count and processing_units are treated as OUTPUT_ONLY fields and reflect the current compute capacity allocated to the instance. + "autoscalingLimits": { # The autoscaling limits for the instance. Users can define the minimum and maximum compute capacity allocated to the instance, and the autoscaler will only scale within that range. Users can either use nodes or processing units to specify the limits, but should use the same unit to set both the min_limit and max_limit. # Required. Autoscaling limits for an instance. + "maxNodes": 42, # Maximum number of nodes allocated to the instance. If set, this number should be greater than or equal to min_nodes. + "maxProcessingUnits": 42, # Maximum number of processing units allocated to the instance. If set, this number should be multiples of 1000 and be greater than or equal to min_processing_units. + "minNodes": 42, # Minimum number of nodes allocated to the instance. If set, this number should be greater than or equal to 1. + "minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. + }, + "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. + "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 100] inclusive. + }, + }, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. "createTime": "A String", # Output only. The time at which the instance was created. "displayName": "A String", # Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. @@ -325,6 +349,18 @@

Method Details

{ # The response for ListInstances. "instances": [ # The list of requested instances. { # An isolated set of Cloud Spanner resources on which databases can be hosted. + "autoscalingConfig": { # Autoscaling config for an instance. # Optional. The autoscaling configuration. Autoscaling is enabled if this field is set. When autoscaling is enabled, node_count and processing_units are treated as OUTPUT_ONLY fields and reflect the current compute capacity allocated to the instance. + "autoscalingLimits": { # The autoscaling limits for the instance. Users can define the minimum and maximum compute capacity allocated to the instance, and the autoscaler will only scale within that range. Users can either use nodes or processing units to specify the limits, but should use the same unit to set both the min_limit and max_limit. # Required. Autoscaling limits for an instance. + "maxNodes": 42, # Maximum number of nodes allocated to the instance. If set, this number should be greater than or equal to min_nodes. + "maxProcessingUnits": 42, # Maximum number of processing units allocated to the instance. If set, this number should be multiples of 1000 and be greater than or equal to min_processing_units. + "minNodes": 42, # Minimum number of nodes allocated to the instance. If set, this number should be greater than or equal to 1. + "minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. + }, + "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. + "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 100] inclusive. + }, + }, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. "createTime": "A String", # Output only. The time at which the instance was created. "displayName": "A String", # Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. @@ -380,6 +416,18 @@

Method Details

{ # The request for UpdateInstance. "fieldMask": "A String", # Required. A mask specifying which fields in Instance should be updated. The field mask must always be specified; this prevents any future fields in Instance from being erased accidentally by clients that do not know about them. "instance": { # An isolated set of Cloud Spanner resources on which databases can be hosted. # Required. The instance to update, which must always include the instance name. Otherwise, only fields mentioned in field_mask need be included. + "autoscalingConfig": { # Autoscaling config for an instance. # Optional. The autoscaling configuration. Autoscaling is enabled if this field is set. When autoscaling is enabled, node_count and processing_units are treated as OUTPUT_ONLY fields and reflect the current compute capacity allocated to the instance. + "autoscalingLimits": { # The autoscaling limits for the instance. Users can define the minimum and maximum compute capacity allocated to the instance, and the autoscaler will only scale within that range. Users can either use nodes or processing units to specify the limits, but should use the same unit to set both the min_limit and max_limit. # Required. Autoscaling limits for an instance. + "maxNodes": 42, # Maximum number of nodes allocated to the instance. If set, this number should be greater than or equal to min_nodes. + "maxProcessingUnits": 42, # Maximum number of processing units allocated to the instance. If set, this number should be multiples of 1000 and be greater than or equal to min_processing_units. + "minNodes": 42, # Minimum number of nodes allocated to the instance. If set, this number should be greater than or equal to 1. + "minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. + }, + "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. + "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 100] inclusive. + }, + }, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. "createTime": "A String", # Output only. The time at which the instance was created. "displayName": "A String", # Required. The descriptive name for this instance as it appears in UIs. Must be unique per project and between 4 and 30 characters in length. diff --git a/docs/dyn/sqladmin_v1.backupRuns.html b/docs/dyn/sqladmin_v1.backupRuns.html index 721bb331c80..625ec9dbc1c 100644 --- a/docs/dyn/sqladmin_v1.backupRuns.html +++ b/docs/dyn/sqladmin_v1.backupRuns.html @@ -172,8 +172,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -359,8 +359,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1.databases.html b/docs/dyn/sqladmin_v1.databases.html index d48e7660a8b..86d4352a6c7 100644 --- a/docs/dyn/sqladmin_v1.databases.html +++ b/docs/dyn/sqladmin_v1.databases.html @@ -175,8 +175,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -332,8 +332,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -494,8 +494,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -620,8 +620,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1.instances.html b/docs/dyn/sqladmin_v1.instances.html index 8f0ae82b000..71bcc7c89e2 100644 --- a/docs/dyn/sqladmin_v1.instances.html +++ b/docs/dyn/sqladmin_v1.instances.html @@ -223,8 +223,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -352,8 +352,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -464,8 +464,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -594,8 +594,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -739,8 +739,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -856,8 +856,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -1087,8 +1087,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -1105,7 +1105,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -1158,8 +1157,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -1246,8 +1245,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -1471,8 +1470,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -1489,7 +1488,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -1586,8 +1584,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -1821,8 +1819,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -1839,7 +1837,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -2122,8 +2119,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -2140,7 +2137,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -2237,8 +2233,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2345,8 +2341,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2462,8 +2458,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2569,8 +2565,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2676,8 +2672,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2795,8 +2791,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2912,8 +2908,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3019,8 +3015,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3126,8 +3122,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3234,8 +3230,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3351,8 +3347,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3577,8 +3573,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -3595,7 +3591,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -3692,8 +3687,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1.operations.html b/docs/dyn/sqladmin_v1.operations.html index 06df8445921..bedf5687f6a 100644 --- a/docs/dyn/sqladmin_v1.operations.html +++ b/docs/dyn/sqladmin_v1.operations.html @@ -187,8 +187,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -298,8 +298,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1.projects.instances.html b/docs/dyn/sqladmin_v1.projects.instances.html index fd1782b4888..f63c8e317a5 100644 --- a/docs/dyn/sqladmin_v1.projects.instances.html +++ b/docs/dyn/sqladmin_v1.projects.instances.html @@ -227,8 +227,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -344,8 +344,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -457,8 +457,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -581,8 +581,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1.sslCerts.html b/docs/dyn/sqladmin_v1.sslCerts.html index 1653a7aba5d..b654d7520b9 100644 --- a/docs/dyn/sqladmin_v1.sslCerts.html +++ b/docs/dyn/sqladmin_v1.sslCerts.html @@ -208,8 +208,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -367,8 +367,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1.users.html b/docs/dyn/sqladmin_v1.users.html index 0909fa91a2b..df69c0f5d27 100644 --- a/docs/dyn/sqladmin_v1.users.html +++ b/docs/dyn/sqladmin_v1.users.html @@ -173,8 +173,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -357,8 +357,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -547,8 +547,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1beta4.backupRuns.html b/docs/dyn/sqladmin_v1beta4.backupRuns.html index ab48042f54c..ed2d93cbe93 100644 --- a/docs/dyn/sqladmin_v1beta4.backupRuns.html +++ b/docs/dyn/sqladmin_v1beta4.backupRuns.html @@ -172,8 +172,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -359,8 +359,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1beta4.databases.html b/docs/dyn/sqladmin_v1beta4.databases.html index 0294cf88e89..4077aebd20a 100644 --- a/docs/dyn/sqladmin_v1beta4.databases.html +++ b/docs/dyn/sqladmin_v1beta4.databases.html @@ -175,8 +175,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -332,8 +332,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -494,8 +494,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -620,8 +620,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1beta4.instances.html b/docs/dyn/sqladmin_v1beta4.instances.html index dae383f8838..416ffa395ec 100644 --- a/docs/dyn/sqladmin_v1beta4.instances.html +++ b/docs/dyn/sqladmin_v1beta4.instances.html @@ -223,8 +223,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -352,8 +352,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -464,8 +464,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -594,8 +594,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -739,8 +739,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -856,8 +856,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -1087,8 +1087,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -1105,7 +1105,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -1158,8 +1157,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -1246,8 +1245,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -1471,8 +1470,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -1489,7 +1488,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -1586,8 +1584,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -1821,8 +1819,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -1839,7 +1837,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -2122,8 +2119,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -2140,7 +2137,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -2237,8 +2233,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2345,8 +2341,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2462,8 +2458,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2569,8 +2565,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2676,8 +2672,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2795,8 +2791,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -2912,8 +2908,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3019,8 +3015,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3126,8 +3122,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3234,8 +3230,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3351,8 +3347,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -3577,8 +3573,8 @@

Method Details

], "pscEnabled": True or False, # Whether PSC connectivity is enabled for this instance. }, - "requireSsl": True or False, # LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix) - "sslMode": "A String", # Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database will respect `ssl_mode` in this case and only accept SSL connections. + "requireSsl": True or False, # Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag. + "sslMode": "A String", # Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means "only accepts SSL connection", while the `require_ssl=false` means "both non-SSL and SSL connections are allowed". The database respects `ssl_mode` in this case and only accepts SSL connections. }, "kind": "A String", # This is always `sql#settings`. "locationPreference": { # Preferred location. This specifies where a Cloud SQL instance is located. Note that if the preferred location is not available, the instance will be located as close as possible within the region. Only one location may be specified. # The location preference settings. This allows the instance to be located as near as possible to either an App Engine app or Compute Engine zone for better performance. App Engine co-location was only applicable to First Generation instances. @@ -3595,7 +3591,6 @@

Method Details

}, "passwordValidationPolicy": { # Database instance local user password validation policy # The local user password validation policy of the instance. "complexity": "A String", # The complexity of the password. - "disallowCompromisedCredentials": True or False, # Disallow credentials that have been previously compromised by a public data breach. "disallowUsernameSubstring": True or False, # Disallow username as a part of the password. "enablePasswordPolicy": True or False, # Whether the password policy is enabled or not. "minLength": 42, # Minimum number of characters allowed. @@ -3692,8 +3687,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1beta4.operations.html b/docs/dyn/sqladmin_v1beta4.operations.html index 45302bbfd32..2d02d0d855d 100644 --- a/docs/dyn/sqladmin_v1beta4.operations.html +++ b/docs/dyn/sqladmin_v1beta4.operations.html @@ -187,8 +187,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -298,8 +298,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1beta4.projects.instances.html b/docs/dyn/sqladmin_v1beta4.projects.instances.html index 9e781091ab9..7a04770ea96 100644 --- a/docs/dyn/sqladmin_v1beta4.projects.instances.html +++ b/docs/dyn/sqladmin_v1beta4.projects.instances.html @@ -227,8 +227,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -344,8 +344,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -457,8 +457,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -581,8 +581,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1beta4.sslCerts.html b/docs/dyn/sqladmin_v1beta4.sslCerts.html index 179b3cb94cb..5cef57198d4 100644 --- a/docs/dyn/sqladmin_v1beta4.sslCerts.html +++ b/docs/dyn/sqladmin_v1beta4.sslCerts.html @@ -208,8 +208,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -367,8 +367,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/sqladmin_v1beta4.users.html b/docs/dyn/sqladmin_v1beta4.users.html index a8ad974cb2d..fb33fa79690 100644 --- a/docs/dyn/sqladmin_v1beta4.users.html +++ b/docs/dyn/sqladmin_v1beta4.users.html @@ -173,8 +173,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -357,8 +357,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. @@ -547,8 +547,8 @@

Method Details

}, "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option Applies only to Cloud SQL for SQL Server. "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server. - "stopAt": "A String", # Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only - "stopAtMark": "A String", # Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only + "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only. + "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only. "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server. }, "csvImportOptions": { # Options for importing data as CSV. diff --git a/docs/dyn/testing_v1.projects.deviceSessions.html b/docs/dyn/testing_v1.projects.deviceSessions.html index fadc083b066..56cb4155d42 100644 --- a/docs/dyn/testing_v1.projects.deviceSessions.html +++ b/docs/dyn/testing_v1.projects.deviceSessions.html @@ -142,16 +142,6 @@

Method Details

"locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. }, - "androidDeviceList": { # A list of Android device configurations in which the test is to be executed. # Optional. The list of requested devices. At most two devices may be simultaneously requested. - "androidDevices": [ # Required. A list of Android devices. - { # A single Android device. - "androidModelId": "A String", # Required. The id of the Android device to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "androidVersionId": "A String", # Required. The id of the Android OS version to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. - "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. - }, - ], - }, "createTime": "A String", # Output only. The time that the Session was created. "displayName": "A String", # Output only. The title of the DeviceSession to be presented in the UI. "expireTime": "A String", # Optional. If the device is still in use at this time, any connections will be ended and the SessionState will transition from ACTIVE to FINISHED. @@ -165,7 +155,7 @@

Method Details

"stateMessage": "A String", # Output only. A human-readable message to explain the state. }, ], - "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the ExtendDeviceSession RPC. Default: 30 minutes. + "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the UpdateDeviceSession RPC. Default: 30 minutes. } x__xgafv: string, V1 error format. @@ -184,16 +174,6 @@

Method Details

"locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. }, - "androidDeviceList": { # A list of Android device configurations in which the test is to be executed. # Optional. The list of requested devices. At most two devices may be simultaneously requested. - "androidDevices": [ # Required. A list of Android devices. - { # A single Android device. - "androidModelId": "A String", # Required. The id of the Android device to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "androidVersionId": "A String", # Required. The id of the Android OS version to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. - "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. - }, - ], - }, "createTime": "A String", # Output only. The time that the Session was created. "displayName": "A String", # Output only. The title of the DeviceSession to be presented in the UI. "expireTime": "A String", # Optional. If the device is still in use at this time, any connections will be ended and the SessionState will transition from ACTIVE to FINISHED. @@ -207,7 +187,7 @@

Method Details

"stateMessage": "A String", # Output only. A human-readable message to explain the state. }, ], - "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the ExtendDeviceSession RPC. Default: 30 minutes. + "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the UpdateDeviceSession RPC. Default: 30 minutes. }
@@ -233,16 +213,6 @@

Method Details

"locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. }, - "androidDeviceList": { # A list of Android device configurations in which the test is to be executed. # Optional. The list of requested devices. At most two devices may be simultaneously requested. - "androidDevices": [ # Required. A list of Android devices. - { # A single Android device. - "androidModelId": "A String", # Required. The id of the Android device to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "androidVersionId": "A String", # Required. The id of the Android OS version to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. - "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. - }, - ], - }, "createTime": "A String", # Output only. The time that the Session was created. "displayName": "A String", # Output only. The title of the DeviceSession to be presented in the UI. "expireTime": "A String", # Optional. If the device is still in use at this time, any connections will be ended and the SessionState will transition from ACTIVE to FINISHED. @@ -256,7 +226,7 @@

Method Details

"stateMessage": "A String", # Output only. A human-readable message to explain the state. }, ], - "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the ExtendDeviceSession RPC. Default: 30 minutes. + "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the UpdateDeviceSession RPC. Default: 30 minutes. }
@@ -287,16 +257,6 @@

Method Details

"locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. }, - "androidDeviceList": { # A list of Android device configurations in which the test is to be executed. # Optional. The list of requested devices. At most two devices may be simultaneously requested. - "androidDevices": [ # Required. A list of Android devices. - { # A single Android device. - "androidModelId": "A String", # Required. The id of the Android device to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "androidVersionId": "A String", # Required. The id of the Android OS version to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. - "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. - }, - ], - }, "createTime": "A String", # Output only. The time that the Session was created. "displayName": "A String", # Output only. The title of the DeviceSession to be presented in the UI. "expireTime": "A String", # Optional. If the device is still in use at this time, any connections will be ended and the SessionState will transition from ACTIVE to FINISHED. @@ -310,7 +270,7 @@

Method Details

"stateMessage": "A String", # Output only. A human-readable message to explain the state. }, ], - "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the ExtendDeviceSession RPC. Default: 30 minutes. + "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the UpdateDeviceSession RPC. Default: 30 minutes. }, ], "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. @@ -348,16 +308,6 @@

Method Details

"locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. }, - "androidDeviceList": { # A list of Android device configurations in which the test is to be executed. # Optional. The list of requested devices. At most two devices may be simultaneously requested. - "androidDevices": [ # Required. A list of Android devices. - { # A single Android device. - "androidModelId": "A String", # Required. The id of the Android device to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "androidVersionId": "A String", # Required. The id of the Android OS version to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. - "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. - }, - ], - }, "createTime": "A String", # Output only. The time that the Session was created. "displayName": "A String", # Output only. The title of the DeviceSession to be presented in the UI. "expireTime": "A String", # Optional. If the device is still in use at this time, any connections will be ended and the SessionState will transition from ACTIVE to FINISHED. @@ -371,7 +321,7 @@

Method Details

"stateMessage": "A String", # Output only. A human-readable message to explain the state. }, ], - "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the ExtendDeviceSession RPC. Default: 30 minutes. + "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the UpdateDeviceSession RPC. Default: 30 minutes. } updateMask: string, Required. The list of fields to update. @@ -391,16 +341,6 @@

Method Details

"locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. }, - "androidDeviceList": { # A list of Android device configurations in which the test is to be executed. # Optional. The list of requested devices. At most two devices may be simultaneously requested. - "androidDevices": [ # Required. A list of Android devices. - { # A single Android device. - "androidModelId": "A String", # Required. The id of the Android device to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "androidVersionId": "A String", # Required. The id of the Android OS version to be used. Use the TestEnvironmentDiscoveryService to get supported options. - "locale": "A String", # Required. The locale the test device used for testing. Use the TestEnvironmentDiscoveryService to get supported options. - "orientation": "A String", # Required. How the device is oriented during the test. Use the TestEnvironmentDiscoveryService to get supported options. - }, - ], - }, "createTime": "A String", # Output only. The time that the Session was created. "displayName": "A String", # Output only. The title of the DeviceSession to be presented in the UI. "expireTime": "A String", # Optional. If the device is still in use at this time, any connections will be ended and the SessionState will transition from ACTIVE to FINISHED. @@ -414,7 +354,7 @@

Method Details

"stateMessage": "A String", # Output only. A human-readable message to explain the state. }, ], - "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the ExtendDeviceSession RPC. Default: 30 minutes. + "ttl": "A String", # Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the UpdateDeviceSession RPC. Default: 30 minutes. }
diff --git a/docs/dyn/texttospeech_v1.projects.locations.html b/docs/dyn/texttospeech_v1.projects.locations.html index bec640a3df4..b01042cb53b 100644 --- a/docs/dyn/texttospeech_v1.projects.locations.html +++ b/docs/dyn/texttospeech_v1.projects.locations.html @@ -119,7 +119,7 @@

Method Details

"voice": { # Description of which voice to use for a synthesis request. # Required. The desired voice of the synthesized audio. "customVoice": { # Description of the custom voice to be synthesized. # The configuration for a custom voice. If [CustomVoiceParams.model] is set, the service will choose the custom voice matching the specified configuration. "model": "A String", # Required. The name of the AutoML model that synthesizes the custom voice. - "reportedUsage": "A String", # Optional. The usage of the synthesized audio to be reported. + "reportedUsage": "A String", # Optional. Deprecated. The usage of the synthesized audio to be reported. }, "languageCode": "A String", # Required. The language (and potentially also the region) of the voice expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. "en-US". This should not include a script tag (e.g. use "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred from the input provided in the SynthesisInput. The TTS service will use this parameter to help choose an appropriate voice. Note that the TTS service may choose a voice with a slightly different language code than the one selected; it may substitute a different region (e.g. using en-US rather than en-CA if there isn't a Canadian voice available), or even a different language, e.g. using "nb" (Norwegian Bokmal) instead of "no" (Norwegian)". "name": "A String", # The name of the voice. If not set, the service will choose a voice based on the other parameters such as language_code and gender. diff --git a/docs/dyn/texttospeech_v1.text.html b/docs/dyn/texttospeech_v1.text.html index 584da3a315a..ce62dec5d39 100644 --- a/docs/dyn/texttospeech_v1.text.html +++ b/docs/dyn/texttospeech_v1.text.html @@ -112,7 +112,7 @@

Method Details

"voice": { # Description of which voice to use for a synthesis request. # Required. The desired voice of the synthesized audio. "customVoice": { # Description of the custom voice to be synthesized. # The configuration for a custom voice. If [CustomVoiceParams.model] is set, the service will choose the custom voice matching the specified configuration. "model": "A String", # Required. The name of the AutoML model that synthesizes the custom voice. - "reportedUsage": "A String", # Optional. The usage of the synthesized audio to be reported. + "reportedUsage": "A String", # Optional. Deprecated. The usage of the synthesized audio to be reported. }, "languageCode": "A String", # Required. The language (and potentially also the region) of the voice expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. "en-US". This should not include a script tag (e.g. use "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred from the input provided in the SynthesisInput. The TTS service will use this parameter to help choose an appropriate voice. Note that the TTS service may choose a voice with a slightly different language code than the one selected; it may substitute a different region (e.g. using en-US rather than en-CA if there isn't a Canadian voice available), or even a different language, e.g. using "nb" (Norwegian Bokmal) instead of "no" (Norwegian)". "name": "A String", # The name of the voice. If not set, the service will choose a voice based on the other parameters such as language_code and gender. diff --git a/docs/dyn/texttospeech_v1beta1.projects.locations.html b/docs/dyn/texttospeech_v1beta1.projects.locations.html index 91a487cd0ac..67ee0ceef5c 100644 --- a/docs/dyn/texttospeech_v1beta1.projects.locations.html +++ b/docs/dyn/texttospeech_v1beta1.projects.locations.html @@ -119,7 +119,7 @@

Method Details

"voice": { # Description of which voice to use for a synthesis request. # Required. The desired voice of the synthesized audio. "customVoice": { # Description of the custom voice to be synthesized. # The configuration for a custom voice. If [CustomVoiceParams.model] is set, the service will choose the custom voice matching the specified configuration. "model": "A String", # Required. The name of the AutoML model that synthesizes the custom voice. - "reportedUsage": "A String", # Optional. The usage of the synthesized audio to be reported. + "reportedUsage": "A String", # Optional. Deprecated. The usage of the synthesized audio to be reported. }, "languageCode": "A String", # Required. The language (and potentially also the region) of the voice expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. "en-US". This should not include a script tag (e.g. use "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred from the input provided in the SynthesisInput. The TTS service will use this parameter to help choose an appropriate voice. Note that the TTS service may choose a voice with a slightly different language code than the one selected; it may substitute a different region (e.g. using en-US rather than en-CA if there isn't a Canadian voice available), or even a different language, e.g. using "nb" (Norwegian Bokmal) instead of "no" (Norwegian)". "name": "A String", # The name of the voice. If not set, the service will choose a voice based on the other parameters such as language_code and gender. diff --git a/docs/dyn/texttospeech_v1beta1.text.html b/docs/dyn/texttospeech_v1beta1.text.html index ddb364a4511..fd490db504d 100644 --- a/docs/dyn/texttospeech_v1beta1.text.html +++ b/docs/dyn/texttospeech_v1beta1.text.html @@ -115,7 +115,7 @@

Method Details

"voice": { # Description of which voice to use for a synthesis request. # Required. The desired voice of the synthesized audio. "customVoice": { # Description of the custom voice to be synthesized. # The configuration for a custom voice. If [CustomVoiceParams.model] is set, the service will choose the custom voice matching the specified configuration. "model": "A String", # Required. The name of the AutoML model that synthesizes the custom voice. - "reportedUsage": "A String", # Optional. The usage of the synthesized audio to be reported. + "reportedUsage": "A String", # Optional. Deprecated. The usage of the synthesized audio to be reported. }, "languageCode": "A String", # Required. The language (and potentially also the region) of the voice expressed as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag, e.g. "en-US". This should not include a script tag (e.g. use "cmn-cn" rather than "cmn-Hant-cn"), because the script will be inferred from the input provided in the SynthesisInput. The TTS service will use this parameter to help choose an appropriate voice. Note that the TTS service may choose a voice with a slightly different language code than the one selected; it may substitute a different region (e.g. using en-US rather than en-CA if there isn't a Canadian voice available), or even a different language, e.g. using "nb" (Norwegian Bokmal) instead of "no" (Norwegian)". "name": "A String", # The name of the voice. If not set, the service will choose a voice based on the other parameters such as language_code and gender. diff --git a/docs/dyn/vmmigration_v1.projects.locations.sources.html b/docs/dyn/vmmigration_v1.projects.locations.sources.html index baaedbe902b..2b1cbdcbbe3 100644 --- a/docs/dyn/vmmigration_v1.projects.locations.sources.html +++ b/docs/dyn/vmmigration_v1.projects.locations.sources.html @@ -188,6 +188,9 @@

Method Details

}, "createTime": "A String", # Output only. The create time timestamp. "description": "A String", # User-provided description of the source. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption details of the source data stored by the service. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "labels": { # The labels of the source. "a_key": "A String", }, @@ -462,6 +465,9 @@

Method Details

}, "createTime": "A String", # Output only. The create time timestamp. "description": "A String", # User-provided description of the source. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption details of the source data stored by the service. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "labels": { # The labels of the source. "a_key": "A String", }, @@ -555,6 +561,9 @@

Method Details

}, "createTime": "A String", # Output only. The create time timestamp. "description": "A String", # User-provided description of the source. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption details of the source data stored by the service. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "labels": { # The labels of the source. "a_key": "A String", }, @@ -655,6 +664,9 @@

Method Details

}, "createTime": "A String", # Output only. The create time timestamp. "description": "A String", # User-provided description of the source. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption details of the source data stored by the service. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "labels": { # The labels of the source. "a_key": "A String", }, diff --git a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cloneJobs.html b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cloneJobs.html index 2f1eacc9ccd..1646bc9b3f3 100644 --- a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cloneJobs.html +++ b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cloneJobs.html @@ -186,6 +186,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -324,6 +327,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -438,6 +444,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", diff --git a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cutoverJobs.html b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cutoverJobs.html index 3339d965f6d..e0b76e1843e 100644 --- a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cutoverJobs.html +++ b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.cutoverJobs.html @@ -186,6 +186,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -434,6 +437,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -658,6 +664,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", diff --git a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.html b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.html index abd0c0135cd..055488099ae 100644 --- a/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.html +++ b/docs/dyn/vmmigration_v1.projects.locations.sources.migratingVms.html @@ -180,6 +180,9 @@

Method Details

}, "diskName": "A String", # Optional. The name of the Persistent Disk to create. "diskType": "A String", # The disk type to use. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "sourceDiskNumber": 42, # Required. The ordinal number of the source VM disk. "vmAttachmentDetails": { # Details for attachment of the disk to a VM. # Optional. Details for attachment of the disk to a VM. Used when the disk is set to be attacked to a target VM. "deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. @@ -197,6 +200,9 @@

Method Details

"deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. "diskName": "A String", # Optional. The name of the disk. "diskType": "A String", # Optional. The type of disk provisioning to use for the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the boot disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "image": { # Contains details about the image source used to create the disk. # The image to use when creating the disk. "sourceImage": "A String", # Required. The Image resource used when creating the disk. }, @@ -215,6 +221,9 @@

Method Details

"onHostMaintenance": "A String", # How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance. "restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # Optional. The hostname to assign to the VM. "labels": { # Optional. A map of labels to associate with the VM. "a_key": "A String", @@ -265,6 +274,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -466,6 +478,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -560,6 +575,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -917,6 +935,9 @@

Method Details

}, "diskName": "A String", # Optional. The name of the Persistent Disk to create. "diskType": "A String", # The disk type to use. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "sourceDiskNumber": 42, # Required. The ordinal number of the source VM disk. "vmAttachmentDetails": { # Details for attachment of the disk to a VM. # Optional. Details for attachment of the disk to a VM. Used when the disk is set to be attacked to a target VM. "deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. @@ -934,6 +955,9 @@

Method Details

"deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. "diskName": "A String", # Optional. The name of the disk. "diskType": "A String", # Optional. The type of disk provisioning to use for the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the boot disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "image": { # Contains details about the image source used to create the disk. # The image to use when creating the disk. "sourceImage": "A String", # Required. The Image resource used when creating the disk. }, @@ -952,6 +976,9 @@

Method Details

"onHostMaintenance": "A String", # How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance. "restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # Optional. The hostname to assign to the VM. "labels": { # Optional. A map of labels to associate with the VM. "a_key": "A String", @@ -1002,6 +1029,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -1203,6 +1233,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -1297,6 +1330,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -1554,6 +1590,9 @@

Method Details

}, "diskName": "A String", # Optional. The name of the Persistent Disk to create. "diskType": "A String", # The disk type to use. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "sourceDiskNumber": 42, # Required. The ordinal number of the source VM disk. "vmAttachmentDetails": { # Details for attachment of the disk to a VM. # Optional. Details for attachment of the disk to a VM. Used when the disk is set to be attacked to a target VM. "deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. @@ -1571,6 +1610,9 @@

Method Details

"deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. "diskName": "A String", # Optional. The name of the disk. "diskType": "A String", # Optional. The type of disk provisioning to use for the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the boot disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "image": { # Contains details about the image source used to create the disk. # The image to use when creating the disk. "sourceImage": "A String", # Required. The Image resource used when creating the disk. }, @@ -1589,6 +1631,9 @@

Method Details

"onHostMaintenance": "A String", # How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance. "restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # Optional. The hostname to assign to the VM. "labels": { # Optional. A map of labels to associate with the VM. "a_key": "A String", @@ -1639,6 +1684,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -1840,6 +1888,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -1934,6 +1985,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -2195,6 +2249,9 @@

Method Details

}, "diskName": "A String", # Optional. The name of the Persistent Disk to create. "diskType": "A String", # The disk type to use. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "sourceDiskNumber": 42, # Required. The ordinal number of the source VM disk. "vmAttachmentDetails": { # Details for attachment of the disk to a VM. # Optional. Details for attachment of the disk to a VM. Used when the disk is set to be attacked to a target VM. "deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. @@ -2212,6 +2269,9 @@

Method Details

"deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. "diskName": "A String", # Optional. The name of the disk. "diskType": "A String", # Optional. The type of disk provisioning to use for the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the boot disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "image": { # Contains details about the image source used to create the disk. # The image to use when creating the disk. "sourceImage": "A String", # Required. The Image resource used when creating the disk. }, @@ -2230,6 +2290,9 @@

Method Details

"onHostMaintenance": "A String", # How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance. "restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # Optional. The hostname to assign to the VM. "labels": { # Optional. A map of labels to associate with the VM. "a_key": "A String", @@ -2280,6 +2343,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -2481,6 +2547,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -2575,6 +2644,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", diff --git a/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.html b/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.html index 4dd9017e0ca..6fa501d53c6 100644 --- a/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.html +++ b/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.html @@ -188,6 +188,9 @@

Method Details

}, "createTime": "A String", # Output only. The create time timestamp. "description": "A String", # User-provided description of the source. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption details of the source data stored by the service. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Provides details on the state of the Source in case of an error. "code": 42, # The status code, which should be an enum value of google.rpc.Code. "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. @@ -472,6 +475,9 @@

Method Details

}, "createTime": "A String", # Output only. The create time timestamp. "description": "A String", # User-provided description of the source. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption details of the source data stored by the service. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Provides details on the state of the Source in case of an error. "code": 42, # The status code, which should be an enum value of google.rpc.Code. "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. @@ -574,6 +580,9 @@

Method Details

}, "createTime": "A String", # Output only. The create time timestamp. "description": "A String", # User-provided description of the source. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption details of the source data stored by the service. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Provides details on the state of the Source in case of an error. "code": 42, # The status code, which should be an enum value of google.rpc.Code. "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. @@ -683,6 +692,9 @@

Method Details

}, "createTime": "A String", # Output only. The create time timestamp. "description": "A String", # User-provided description of the source. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption details of the source data stored by the service. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # Output only. Provides details on the state of the Source in case of an error. "code": 42, # The status code, which should be an enum value of google.rpc.Code. "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. diff --git a/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.cloneJobs.html b/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.cloneJobs.html index 0539d90b595..f37400aa954 100644 --- a/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.cloneJobs.html +++ b/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.cloneJobs.html @@ -187,6 +187,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -432,6 +435,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -653,6 +659,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", diff --git a/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.cutoverJobs.html b/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.cutoverJobs.html index c7445226cd2..c881b812ee1 100644 --- a/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.cutoverJobs.html +++ b/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.cutoverJobs.html @@ -187,6 +187,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -545,6 +548,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -879,6 +885,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", diff --git a/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.html b/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.html index 5dbd77a7844..af7ef2adea2 100644 --- a/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.html +++ b/docs/dyn/vmmigration_v1alpha1.projects.locations.sources.migratingVms.html @@ -180,6 +180,9 @@

Method Details

}, "diskName": "A String", # Optional. The name of the Persistent Disk to create. "diskType": "A String", # The disk type to use. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "sourceDiskNumber": 42, # Required. The ordinal number of the source VM disk. "vmAttachmentDetails": { # Details for attachment of the disk to a VM. # Optional. Details for attachment of the disk to a VM. Used when the disk is set to be attacked to a target VM. "deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. @@ -197,6 +200,9 @@

Method Details

"deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. "diskName": "A String", # Optional. The name of the disk. "diskType": "A String", # Optional. The type of disk provisioning to use for the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the boot disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "image": { # Contains details about the image source used to create the disk. # The image to use when creating the disk. "sourceImage": "A String", # Required. The Image resource used when creating the disk. }, @@ -216,6 +222,9 @@

Method Details

"onHostMaintenance": "A String", # How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance. "restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # Optional. The hostname to assign to the VM. "labels": { # Optional. A map of labels to associate with the VM. "a_key": "A String", @@ -267,6 +276,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -524,6 +536,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -725,6 +740,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -1244,6 +1262,9 @@

Method Details

}, "diskName": "A String", # Optional. The name of the Persistent Disk to create. "diskType": "A String", # The disk type to use. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "sourceDiskNumber": 42, # Required. The ordinal number of the source VM disk. "vmAttachmentDetails": { # Details for attachment of the disk to a VM. # Optional. Details for attachment of the disk to a VM. Used when the disk is set to be attacked to a target VM. "deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. @@ -1261,6 +1282,9 @@

Method Details

"deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. "diskName": "A String", # Optional. The name of the disk. "diskType": "A String", # Optional. The type of disk provisioning to use for the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the boot disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "image": { # Contains details about the image source used to create the disk. # The image to use when creating the disk. "sourceImage": "A String", # Required. The Image resource used when creating the disk. }, @@ -1280,6 +1304,9 @@

Method Details

"onHostMaintenance": "A String", # How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance. "restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # Optional. The hostname to assign to the VM. "labels": { # Optional. A map of labels to associate with the VM. "a_key": "A String", @@ -1331,6 +1358,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -1588,6 +1618,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -1789,6 +1822,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -2208,6 +2244,9 @@

Method Details

}, "diskName": "A String", # Optional. The name of the Persistent Disk to create. "diskType": "A String", # The disk type to use. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "sourceDiskNumber": 42, # Required. The ordinal number of the source VM disk. "vmAttachmentDetails": { # Details for attachment of the disk to a VM. # Optional. Details for attachment of the disk to a VM. Used when the disk is set to be attacked to a target VM. "deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. @@ -2225,6 +2264,9 @@

Method Details

"deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. "diskName": "A String", # Optional. The name of the disk. "diskType": "A String", # Optional. The type of disk provisioning to use for the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the boot disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "image": { # Contains details about the image source used to create the disk. # The image to use when creating the disk. "sourceImage": "A String", # Required. The Image resource used when creating the disk. }, @@ -2244,6 +2286,9 @@

Method Details

"onHostMaintenance": "A String", # How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance. "restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # Optional. The hostname to assign to the VM. "labels": { # Optional. A map of labels to associate with the VM. "a_key": "A String", @@ -2295,6 +2340,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -2552,6 +2600,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -2753,6 +2804,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -3176,6 +3230,9 @@

Method Details

}, "diskName": "A String", # Optional. The name of the Persistent Disk to create. "diskType": "A String", # The disk type to use. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "sourceDiskNumber": 42, # Required. The ordinal number of the source VM disk. "vmAttachmentDetails": { # Details for attachment of the disk to a VM. # Optional. Details for attachment of the disk to a VM. Used when the disk is set to be attacked to a target VM. "deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. @@ -3193,6 +3250,9 @@

Method Details

"deviceName": "A String", # Optional. Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disk-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks. "diskName": "A String", # Optional. The name of the disk. "diskType": "A String", # Optional. The type of disk provisioning to use for the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the boot disk. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "image": { # Contains details about the image source used to create the disk. # The image to use when creating the disk. "sourceImage": "A String", # Required. The Image resource used when creating the disk. }, @@ -3212,6 +3272,9 @@

Method Details

"onHostMaintenance": "A String", # How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance. "restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # Optional. The hostname to assign to the VM. "labels": { # Optional. A map of labels to associate with the VM. "a_key": "A String", @@ -3263,6 +3326,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. Immutable. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -3520,6 +3586,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", @@ -3721,6 +3790,9 @@

Method Details

"restartType": "A String", # Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user). This configuration is identical to `automaticRestart` field in Compute Engine create instance under scheduling. It was changed to an enum (instead of a boolean) to match the default value in Compute Engine which is automatic restart. }, "diskType": "A String", # The disk type to use in the VM. + "encryption": { # Encryption message describes the details of the applied encryption. # Optional. The encryption to apply to the VM disks. + "kmsKey": "A String", # Required. The name of the encryption key that is stored in Google Cloud KMS. + }, "hostname": "A String", # The hostname to assign to the VM. "labels": { # A map of labels to associate with the VM. "a_key": "A String", diff --git a/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.html b/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.html index 361a55cdf66..180f0e101ea 100644 --- a/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.html +++ b/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.html @@ -79,6 +79,11 @@

Instance Methods

Returns the callbacks Resource.

+

+ stepEntries() +

+

Returns the stepEntries Resource.

+

cancel(name, body=None, x__xgafv=None)

Cancels an execution of the given name.

diff --git a/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.stepEntries.html b/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.stepEntries.html new file mode 100644 index 00000000000..2205251e72c --- /dev/null +++ b/docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.stepEntries.html @@ -0,0 +1,204 @@ + + + +

Workflow Executions API . projects . locations . workflows . executions . stepEntries

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets a step entry.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, skip=None, x__xgafv=None)

+

Lists step entries for the corresponding workflow execution. Returned entries are ordered by their create_time.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets a step entry.
+
+Args:
+  name: string, Required. The name of the step entry to retrieve. Format: projects/{project}/locations/{location}/workflows/{workflow}/executions/{execution}/stepEntries/{step_entry} (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An StepEntry contains debugging information for a step transition in a workflow execution.
+  "createTime": "A String", # Output only. The creation time of the step entry.
+  "entryId": "A String", # Output only. The numeric ID of this step entry, used for navigation.
+  "exception": { # Exception describes why the step entry failed. # Output only. The exception thrown by the step entry.
+    "payload": "A String", # Error message represented as a JSON string.
+  },
+  "name": "A String", # Output only. The full resource name of the step entry. Each step entry has a unique entry ID, which is a monotonically increasing counter. Step entry names have the format: `projects/{project}/locations/{location}/workflows/{workflow}/executions/{execution}/stepEntries/{step_entry}`.
+  "navigationInfo": { # NavigationInfo describes what steps if any come before or after this step, or what steps are parents or children of this step. # Output only. The NavigationInfo associated to this step.
+    "children": [ # Step entries that can be reached by "stepping into" e.g. a subworkflow call.
+      "A String",
+    ],
+    "next": "A String", # The index of the next step in the current workflow, if any.
+    "parent": "A String", # The step entry, if any, that can be reached by "stepping out" of the current workflow being executed.
+    "previous": "A String", # The index of the previous step in the current workflow, if any.
+  },
+  "routine": "A String", # Output only. The name of the routine this step entry belongs to. A routine name is the subworkflow name defined in the YAML source code. The top level routine name is `main`.
+  "state": "A String", # Output only. The state of the step entry.
+  "step": "A String", # Output only. The name of the step this step entry belongs to.
+  "stepEntryMetadata": { # StepEntryMetadata contains metadata information about this step. # Output only. The StepEntryMetadata associated to this step.
+    "progressNumber": "A String", # Progress number represents the current state of the current progress. eg: A step entry represents the 4th iteration in a progress of PROGRESS_TYPE_FOR.
+    "progressType": "A String", # Progress type of this step entry.
+    "threadId": "A String", # Child thread id that this step entry belongs to.
+  },
+  "stepType": "A String", # Output only. The type of the step this step entry belongs to.
+  "updateTime": "A String", # Output only. The most recently updated time of the step entry.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, skip=None, x__xgafv=None) +
Lists step entries for the corresponding workflow execution. Returned entries are ordered by their create_time.
+
+Args:
+  parent: string, Required. Name of the workflow execution to list entries for. Format: projects/{project}/locations/{location}/workflows/{workflow}/executions/{execution}/stepEntries/ (required)
+  filter: string, Optional. Filters applied to the `[StepEntries.ListStepEntries]` results. The following fields are supported for filtering: `entryId`, `createTime`, `updateTime`, `routine`, `step`, `stepType`, `state`. For details, see AIP-160. For example, if you are using the Google APIs Explorer: `state="SUCCEEDED"` or `createTime>"2023-08-01" AND state="FAILED"`
+  orderBy: string, Optional. Comma-separated list of fields that specify the ordering applied to the `[StepEntries.ListStepEntries]` results. By default the ordering is based on ascending `entryId`. The following fields are supported for ordering: `entryId`, `createTime`, `updateTime`, `routine`, `step`, `stepType`, `state`. For details, see AIP-132.
+  pageSize: integer, Optional. Number of step entries to return per call. The default max is 1000.
+  pageToken: string, Optional. A page token, received from a previous `ListStepEntries` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListStepEntries` must match the call that provided the page token.
+  skip: integer, Optional. The number of step entries to skip. It can be used with or without a pageToken. If used with a pageToken, then it indicates the number of step entries to skip starting from the requested page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for ExecutionHistory.ListStepEntries.
+  "nextPageToken": "A String", # A token to retrieve next page of results. Pass this value in the ListStepEntriesRequest.page_token field in the subsequent call to `ListStepEntries` method to retrieve the next page of results.
+  "stepEntries": [ # The list of entries.
+    { # An StepEntry contains debugging information for a step transition in a workflow execution.
+      "createTime": "A String", # Output only. The creation time of the step entry.
+      "entryId": "A String", # Output only. The numeric ID of this step entry, used for navigation.
+      "exception": { # Exception describes why the step entry failed. # Output only. The exception thrown by the step entry.
+        "payload": "A String", # Error message represented as a JSON string.
+      },
+      "name": "A String", # Output only. The full resource name of the step entry. Each step entry has a unique entry ID, which is a monotonically increasing counter. Step entry names have the format: `projects/{project}/locations/{location}/workflows/{workflow}/executions/{execution}/stepEntries/{step_entry}`.
+      "navigationInfo": { # NavigationInfo describes what steps if any come before or after this step, or what steps are parents or children of this step. # Output only. The NavigationInfo associated to this step.
+        "children": [ # Step entries that can be reached by "stepping into" e.g. a subworkflow call.
+          "A String",
+        ],
+        "next": "A String", # The index of the next step in the current workflow, if any.
+        "parent": "A String", # The step entry, if any, that can be reached by "stepping out" of the current workflow being executed.
+        "previous": "A String", # The index of the previous step in the current workflow, if any.
+      },
+      "routine": "A String", # Output only. The name of the routine this step entry belongs to. A routine name is the subworkflow name defined in the YAML source code. The top level routine name is `main`.
+      "state": "A String", # Output only. The state of the step entry.
+      "step": "A String", # Output only. The name of the step this step entry belongs to.
+      "stepEntryMetadata": { # StepEntryMetadata contains metadata information about this step. # Output only. The StepEntryMetadata associated to this step.
+        "progressNumber": "A String", # Progress number represents the current state of the current progress. eg: A step entry represents the 4th iteration in a progress of PROGRESS_TYPE_FOR.
+        "progressType": "A String", # Progress type of this step entry.
+        "threadId": "A String", # Child thread id that this step entry belongs to.
+      },
+      "stepType": "A String", # Output only. The type of the step this step entry belongs to.
+      "updateTime": "A String", # Output only. The most recently updated time of the step entry.
+    },
+  ],
+  "totalSize": 42, # Indicates the total number of StepEntries that matched the request filter. For running executions, this number shows the number of StepEntries that are executed thus far.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/workstations_v1.projects.locations.workstationClusters.workstationConfigs.html b/docs/dyn/workstations_v1.projects.locations.workstationClusters.workstationConfigs.html index 5361fe0501f..80f441f3bfe 100644 --- a/docs/dyn/workstations_v1.projects.locations.workstationClusters.workstationConfigs.html +++ b/docs/dyn/workstations_v1.projects.locations.workstationClusters.workstationConfigs.html @@ -155,7 +155,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -179,7 +179,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], @@ -333,7 +333,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -357,7 +357,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], @@ -499,7 +499,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -523,7 +523,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], @@ -619,7 +619,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -643,7 +643,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], @@ -754,7 +754,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -778,7 +778,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], diff --git a/docs/dyn/workstations_v1beta.projects.locations.workstationClusters.workstationConfigs.html b/docs/dyn/workstations_v1beta.projects.locations.workstationClusters.workstationConfigs.html index 491e08b5c22..55d05efe7d8 100644 --- a/docs/dyn/workstations_v1beta.projects.locations.workstationClusters.workstationConfigs.html +++ b/docs/dyn/workstations_v1beta.projects.locations.workstationClusters.workstationConfigs.html @@ -155,7 +155,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -197,7 +197,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], @@ -351,7 +351,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -393,7 +393,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], @@ -535,7 +535,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -577,7 +577,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], @@ -673,7 +673,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -715,7 +715,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], @@ -826,7 +826,7 @@

Method Details

"env": { # Optional. Environment variables passed to the container's entrypoint. "a_key": "A String", }, - "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible. + "image": "A String", # Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible. "runAsUser": 42, # Optional. If set, overrides the USER specified in the image with the given uid. "workingDir": "A String", # Optional. If set, overrides the default DIR specified by the image. }, @@ -868,7 +868,7 @@

Method Details

"machineType": "A String", # Optional. The type of machine to use for VM instances—for example, `"e2-standard-4"`. For more information about machine types that Cloud Workstations supports, see the list of [available machine types](https://cloud.google.com/workstations/docs/available-machine-types). "poolSize": 42, # Optional. The number of VMs that the system should keep idle so that new workstations can be started quickly for new users. Defaults to `0` in the API. "pooledInstances": 42, # Output only. Number of instances currently available in the pool for faster workstation startup. - "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. + "serviceAccount": "A String", # Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible. "serviceAccountScopes": [ # Optional. Scopes to grant to the service_account. Various scopes are automatically added based on feature usage. When specified, users of workstations under this configuration must have `iam.serviceAccounts.actAs` on the service account. "A String", ], diff --git a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json index 31329cf6d03..5d81036b18f 100644 --- a/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json +++ b/googleapiclient/discovery_cache/documents/acceleratedmobilepageurl.v1.json @@ -115,7 +115,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://acceleratedmobilepageurl.googleapis.com/", "schemas": { "AmpUrl": { diff --git a/googleapiclient/discovery_cache/documents/accesscontextmanager.v1beta.json b/googleapiclient/discovery_cache/documents/accesscontextmanager.v1beta.json index f3ff7685ca4..a9a0e7b2f0c 100644 --- a/googleapiclient/discovery_cache/documents/accesscontextmanager.v1beta.json +++ b/googleapiclient/discovery_cache/documents/accesscontextmanager.v1beta.json @@ -609,7 +609,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://accesscontextmanager.googleapis.com/", "schemas": { "AccessContextManagerOperationMetadata": { diff --git a/googleapiclient/discovery_cache/documents/acmedns.v1.json b/googleapiclient/discovery_cache/documents/acmedns.v1.json index cb8f7d5f0d3..b90d73344e4 100644 --- a/googleapiclient/discovery_cache/documents/acmedns.v1.json +++ b/googleapiclient/discovery_cache/documents/acmedns.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://acmedns.googleapis.com/", "schemas": { "AcmeChallengeSet": { diff --git a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json index 9592c7482b4..cd550501b89 100644 --- a/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/adexchangebuyer2.v2beta1.json @@ -2568,7 +2568,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://adexchangebuyer.googleapis.com/", "schemas": { "AbsoluteDateRange": { diff --git a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json index f7f57a089f8..ddf249b1544 100644 --- a/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.datatransfer_v1.json @@ -272,7 +272,7 @@ } } }, - "revision": "20231017", + "revision": "20231023", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Application": { diff --git a/googleapiclient/discovery_cache/documents/admin.directory_v1.json b/googleapiclient/discovery_cache/documents/admin.directory_v1.json index b654e506370..828a5650f0d 100644 --- a/googleapiclient/discovery_cache/documents/admin.directory_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.directory_v1.json @@ -4643,7 +4643,7 @@ } } }, - "revision": "20231017", + "revision": "20231023", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Alias": { diff --git a/googleapiclient/discovery_cache/documents/admin.reports_v1.json b/googleapiclient/discovery_cache/documents/admin.reports_v1.json index 0052c5f1319..7e5db6b2d37 100644 --- a/googleapiclient/discovery_cache/documents/admin.reports_v1.json +++ b/googleapiclient/discovery_cache/documents/admin.reports_v1.json @@ -623,7 +623,7 @@ } } }, - "revision": "20231017", + "revision": "20231023", "rootUrl": "https://admin.googleapis.com/", "schemas": { "Activities": { diff --git a/googleapiclient/discovery_cache/documents/admob.v1.json b/googleapiclient/discovery_cache/documents/admob.v1.json index 1048db51768..8e78ed4349b 100644 --- a/googleapiclient/discovery_cache/documents/admob.v1.json +++ b/googleapiclient/discovery_cache/documents/admob.v1.json @@ -321,7 +321,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://admob.googleapis.com/", "schemas": { "AdUnit": { diff --git a/googleapiclient/discovery_cache/documents/admob.v1beta.json b/googleapiclient/discovery_cache/documents/admob.v1beta.json index e14a175d434..fb8f62fb781 100644 --- a/googleapiclient/discovery_cache/documents/admob.v1beta.json +++ b/googleapiclient/discovery_cache/documents/admob.v1beta.json @@ -758,7 +758,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://admob.googleapis.com/", "schemas": { "AdSource": { diff --git a/googleapiclient/discovery_cache/documents/adsense.v2.json b/googleapiclient/discovery_cache/documents/adsense.v2.json index 4231933dd11..766f6194d67 100644 --- a/googleapiclient/discovery_cache/documents/adsense.v2.json +++ b/googleapiclient/discovery_cache/documents/adsense.v2.json @@ -1844,7 +1844,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://adsense.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/advisorynotifications.v1.json b/googleapiclient/discovery_cache/documents/advisorynotifications.v1.json index 66fea77240b..676fb989d39 100644 --- a/googleapiclient/discovery_cache/documents/advisorynotifications.v1.json +++ b/googleapiclient/discovery_cache/documents/advisorynotifications.v1.json @@ -259,7 +259,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://advisorynotifications.googleapis.com/", "schemas": { "GoogleCloudAdvisorynotificationsV1Attachment": { diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json index 3f6d6d942bd..1f04bc4239b 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json @@ -12998,7 +12998,7 @@ } } }, - "revision": "20231012", + "revision": "20231023", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -14711,7 +14711,7 @@ "type": "string" }, "protectedArtifactLocationId": { - "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations", + "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations", "type": "string" }, "reservedIpRanges": { @@ -26815,7 +26815,7 @@ "type": "object" }, "GoogleCloudAiplatformV1Study": { - "description": "A message representing a Study.", + "description": "A message representing a Study. Next id: 12", "id": "GoogleCloudAiplatformV1Study", "properties": { "createTime": { @@ -26933,6 +26933,10 @@ "$ref": "GoogleCloudAiplatformV1StudySpecParameterSpec" }, "type": "array" + }, + "studyStoppingConfig": { + "$ref": "GoogleCloudAiplatformV1StudySpecStudyStoppingConfig", + "description": "Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition." } }, "type": "object" @@ -27238,6 +27242,62 @@ }, "type": "object" }, + "GoogleCloudAiplatformV1StudySpecStudyStoppingConfig": { + "description": "The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection.", + "id": "GoogleCloudAiplatformV1StudySpecStudyStoppingConfig", + "properties": { + "maxDurationNoProgress": { + "description": "If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.", + "format": "google-duration", + "type": "string" + }, + "maxNumTrials": { + "description": "If there are more than this many trials, stop the study.", + "format": "int32", + "type": "integer" + }, + "maxNumTrialsNoProgress": { + "description": "If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.", + "format": "int32", + "type": "integer" + }, + "maximumRuntimeConstraint": { + "$ref": "GoogleCloudAiplatformV1StudyTimeConstraint", + "description": "If the specified time or duration has passed, stop the study." + }, + "minNumTrials": { + "description": "If there are fewer than this many COMPLETED trials, do not stop the study.", + "format": "int32", + "type": "integer" + }, + "minimumRuntimeConstraint": { + "$ref": "GoogleCloudAiplatformV1StudyTimeConstraint", + "description": "Each \"stopping rule\" in this proto specifies an \"if\" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose \"if\" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study." + }, + "shouldStopAsap": { + "description": "If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).", + "type": "boolean" + } + }, + "type": "object" + }, + "GoogleCloudAiplatformV1StudyTimeConstraint": { + "description": "Time-based Constraint for Study", + "id": "GoogleCloudAiplatformV1StudyTimeConstraint", + "properties": { + "endTime": { + "description": "Compares the wallclock time to this time. Must use UTC timezone.", + "format": "google-datetime", + "type": "string" + }, + "maxDuration": { + "description": "Counts the wallclock time passed since the creation of this Study.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudAiplatformV1SuggestTrialsMetadata": { "description": "Details of operations that perform Trials suggestion.", "id": "GoogleCloudAiplatformV1SuggestTrialsMetadata", diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json index 5daae0c9a32..c3353fa5998 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json @@ -16094,7 +16094,7 @@ } } }, - "revision": "20231012", + "revision": "20231023", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -17987,7 +17987,7 @@ "type": "string" }, "protectedArtifactLocationId": { - "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. For unprotected artifacts, the value of this field is ignored. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations", + "description": "The ID of the location to store protected artifacts. e.g. us-central1. Populate only when the location is different than CustomJob location. List of supported locations: https://cloud.google.com/vertex-ai/docs/general/locations", "type": "string" }, "reservedIpRanges": { @@ -31302,7 +31302,7 @@ "type": "object" }, "GoogleCloudAiplatformV1beta1Study": { - "description": "A message representing a Study.", + "description": "A message representing a Study. Next id: 12", "id": "GoogleCloudAiplatformV1beta1Study", "properties": { "createTime": { @@ -31426,6 +31426,10 @@ }, "type": "array" }, + "studyStoppingConfig": { + "$ref": "GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig", + "description": "Conditions for automated stopping of a Study. Enable automated stopping by configuring at least one condition." + }, "transferLearningConfig": { "$ref": "GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig", "description": "The configuration info/options for transfer learning. Currently supported for Vertex AI Vizier service, not HyperParameterTuningJob" @@ -31765,6 +31769,45 @@ }, "type": "object" }, + "GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig": { + "description": "The configuration (stopping conditions) for automated stopping of a Study. Conditions include trial budgets, time budgets, and convergence detection.", + "id": "GoogleCloudAiplatformV1beta1StudySpecStudyStoppingConfig", + "properties": { + "maxDurationNoProgress": { + "description": "If the objective value has not improved for this much time, stop the study. WARNING: Effective only for single-objective studies.", + "format": "google-duration", + "type": "string" + }, + "maxNumTrials": { + "description": "If there are more than this many trials, stop the study.", + "format": "int32", + "type": "integer" + }, + "maxNumTrialsNoProgress": { + "description": "If the objective value has not improved for this many consecutive trials, stop the study. WARNING: Effective only for single-objective studies.", + "format": "int32", + "type": "integer" + }, + "maximumRuntimeConstraint": { + "$ref": "GoogleCloudAiplatformV1beta1StudyTimeConstraint", + "description": "If the specified time or duration has passed, stop the study." + }, + "minNumTrials": { + "description": "If there are fewer than this many COMPLETED trials, do not stop the study.", + "format": "int32", + "type": "integer" + }, + "minimumRuntimeConstraint": { + "$ref": "GoogleCloudAiplatformV1beta1StudyTimeConstraint", + "description": "Each \"stopping rule\" in this proto specifies an \"if\" condition. Before Vizier would generate a new suggestion, it first checks each specified stopping rule, from top to bottom in this list. Note that the first few rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other stopping rules from being evaluated until they are met. For example, setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the Study will ONLY stop after it has 5 COMPLETED trials, even if more than an hour has passed since its creation. It follows the first applicable rule (whose \"if\" condition is satisfied) to make a stopping decision. If none of the specified rules are applicable, then Vizier decides that the study should not stop. If Vizier decides that the study should stop, the study enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). IMPORTANT: The automatic study state transition happens precisely as described above; that is, deleting trials or updating StudyConfig NEVER automatically moves the study state back to ACTIVE. If you want to _resume_ a Study that was stopped, 1) change the stopping conditions if necessary, 2) activate the study, and then 3) ask for suggestions. If the specified time or duration has not passed, do not stop the study." + }, + "shouldStopAsap": { + "description": "If true, a Study enters STOPPING_ASAP whenever it would normally enters STOPPING state. The bottom line is: set to true if you want to interrupt on-going evaluations of Trials as soon as the study stopping condition is met. (Please see Study.State documentation for the source of truth).", + "type": "boolean" + } + }, + "type": "object" + }, "GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig": { "description": "This contains flag for manually disabling transfer learning for a study. The names of prior studies being used for transfer learning (if any) are also listed here.", "id": "GoogleCloudAiplatformV1beta1StudySpecTransferLearningConfig", @@ -31784,6 +31827,23 @@ }, "type": "object" }, + "GoogleCloudAiplatformV1beta1StudyTimeConstraint": { + "description": "Time-based Constraint for Study", + "id": "GoogleCloudAiplatformV1beta1StudyTimeConstraint", + "properties": { + "endTime": { + "description": "Compares the wallclock time to this time. Must use UTC timezone.", + "format": "google-datetime", + "type": "string" + }, + "maxDuration": { + "description": "Counts the wallclock time passed since the creation of this Study.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "GoogleCloudAiplatformV1beta1SuggestTrialsMetadata": { "description": "Details of operations that perform Trials suggestion.", "id": "GoogleCloudAiplatformV1beta1SuggestTrialsMetadata", diff --git a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json index ba65d789ae5..37c193a6982 100644 --- a/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/alertcenter.v1beta1.json @@ -423,7 +423,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://alertcenter.googleapis.com/", "schemas": { "AbuseDetected": { diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json index bd6abaf02d2..bb96032ddc3 100644 --- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1alpha.json @@ -4298,7 +4298,7 @@ } } }, - "revision": "20231022", + "revision": "20231027", "rootUrl": "https://analyticsadmin.googleapis.com/", "schemas": { "GoogleAnalyticsAdminV1alphaAccessBetweenFilter": { @@ -5822,6 +5822,10 @@ "readOnly": true, "type": "boolean" }, + "defaultConversionValue": { + "$ref": "GoogleAnalyticsAdminV1alphaConversionEventDefaultConversionValue", + "description": "Optional. Defines a default value/currency for a conversion event." + }, "deletable": { "description": "Output only. If set, this event can currently be deleted with DeleteConversionEvent.", "readOnly": true, @@ -5839,6 +5843,22 @@ }, "type": "object" }, + "GoogleAnalyticsAdminV1alphaConversionEventDefaultConversionValue": { + "description": "Defines a default value/currency for a conversion event. Both value and currency must be provided.", + "id": "GoogleAnalyticsAdminV1alphaConversionEventDefaultConversionValue", + "properties": { + "currencyCode": { + "description": "When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.", + "type": "string" + }, + "value": { + "description": "This value will be used to populate the value for all conversions of the specified event_name where the event \"value\" parameter is unset.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, "GoogleAnalyticsAdminV1alphaConversionValues": { "description": "Conversion value settings for a postback window for SKAdNetwork conversion value schema.", "id": "GoogleAnalyticsAdminV1alphaConversionValues", diff --git a/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json index ec017476a58..1da19f4921e 100644 --- a/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json +++ b/googleapiclient/discovery_cache/documents/analyticsadmin.v1beta.json @@ -1628,7 +1628,7 @@ } } }, - "revision": "20231022", + "revision": "20231027", "rootUrl": "https://analyticsadmin.googleapis.com/", "schemas": { "GoogleAnalyticsAdminV1betaAccessBetweenFilter": { @@ -2241,6 +2241,10 @@ "readOnly": true, "type": "boolean" }, + "defaultConversionValue": { + "$ref": "GoogleAnalyticsAdminV1betaConversionEventDefaultConversionValue", + "description": "Optional. Defines a default value/currency for a conversion event." + }, "deletable": { "description": "Output only. If set, this event can currently be deleted with DeleteConversionEvent.", "readOnly": true, @@ -2258,6 +2262,22 @@ }, "type": "object" }, + "GoogleAnalyticsAdminV1betaConversionEventDefaultConversionValue": { + "description": "Defines a default value/currency for a conversion event. Both value and currency must be provided.", + "id": "GoogleAnalyticsAdminV1betaConversionEventDefaultConversionValue", + "properties": { + "currencyCode": { + "description": "When a conversion event for this event_name has no set currency, this currency will be applied as the default. Must be in ISO 4217 currency code format. See https://en.wikipedia.org/wiki/ISO_4217 for more.", + "type": "string" + }, + "value": { + "description": "This value will be used to populate the value for all conversions of the specified event_name where the event \"value\" parameter is unset.", + "format": "double", + "type": "number" + } + }, + "type": "object" + }, "GoogleAnalyticsAdminV1betaCustomDimension": { "description": "A definition for a CustomDimension.", "id": "GoogleAnalyticsAdminV1betaCustomDimension", diff --git a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json index 5e18068123a..9c6b9f4b044 100644 --- a/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json +++ b/googleapiclient/discovery_cache/documents/analyticsdata.v1beta.json @@ -313,7 +313,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://analyticsdata.googleapis.com/", "schemas": { "ActiveMetricRestriction": { diff --git a/googleapiclient/discovery_cache/documents/analyticshub.v1.json b/googleapiclient/discovery_cache/documents/analyticshub.v1.json index 186ef02214a..e192aa284a1 100644 --- a/googleapiclient/discovery_cache/documents/analyticshub.v1.json +++ b/googleapiclient/discovery_cache/documents/analyticshub.v1.json @@ -964,7 +964,7 @@ } } }, - "revision": "20231009", + "revision": "20231016", "rootUrl": "https://analyticshub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/analyticshub.v1beta1.json b/googleapiclient/discovery_cache/documents/analyticshub.v1beta1.json index fec9b75f9fb..ee88c132293 100644 --- a/googleapiclient/discovery_cache/documents/analyticshub.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/analyticshub.v1beta1.json @@ -695,7 +695,7 @@ } } }, - "revision": "20231009", + "revision": "20231016", "rootUrl": "https://analyticshub.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json index 095c368862c..01d69175f42 100644 --- a/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json +++ b/googleapiclient/discovery_cache/documents/androiddeviceprovisioning.v1.json @@ -851,13 +851,18 @@ } } }, - "revision": "20231021", + "revision": "20231029", "rootUrl": "https://androiddeviceprovisioning.googleapis.com/", "schemas": { "ClaimDeviceRequest": { "description": "Request message to claim a device on behalf of a customer.", "id": "ClaimDeviceRequest", "properties": { + "configurationId": { + "description": "Optional. The unique identifier of the configuration (internally known as profile) to set for the section.", + "format": "int64", + "type": "string" + }, "customerId": { "description": "The ID of the customer for whom the device is being claimed.", "format": "int64", @@ -894,7 +899,7 @@ "type": "string" }, "simlockProfileId": { - "description": "Optional. Must and can only be set when DeviceProvisioningSectionType is SECTION_TYPE_SIM_LOCK. The unique identifier of the SimLock profile.", + "description": "Optional. ", "format": "int64", "type": "string" } @@ -1709,6 +1714,11 @@ "description": "Identifies one claim request.", "id": "PartnerClaim", "properties": { + "configurationId": { + "description": "Optional. The unique identifier of the configuration (internally known as profile) to set for the section.", + "format": "int64", + "type": "string" + }, "customerId": { "description": "The ID of the customer for whom the device is being claimed.", "format": "int64", @@ -1745,7 +1755,7 @@ "type": "string" }, "simlockProfileId": { - "description": "Optional. Must and can only be set when DeviceProvisioningSectionType is SECTION_TYPE_SIM_LOCK. The unique identifier of the SimLock profile.", + "description": "Optional. ", "format": "int64", "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json index 547861b19d6..a5f50f95783 100644 --- a/googleapiclient/discovery_cache/documents/androidenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/androidenterprise.v1.json @@ -2649,7 +2649,7 @@ } } }, - "revision": "20231023", + "revision": "20231026", "rootUrl": "https://androidenterprise.googleapis.com/", "schemas": { "Administrator": { diff --git a/googleapiclient/discovery_cache/documents/androidmanagement.v1.json b/googleapiclient/discovery_cache/documents/androidmanagement.v1.json index 6d6c34433c5..32b72b6ead0 100644 --- a/googleapiclient/discovery_cache/documents/androidmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/androidmanagement.v1.json @@ -1095,7 +1095,7 @@ } } }, - "revision": "20231016", + "revision": "20231025", "rootUrl": "https://androidmanagement.googleapis.com/", "schemas": { "AdbShellCommandEvent": { @@ -1568,7 +1568,7 @@ ], "enumDescriptions": [ "Unspecified. Defaults to AUTO_UPDATE_DEFAULT.", - "The app is automatically updated with low priority to minimize the impact on the user.The app is updated when all of the following constraints are met: The device is not actively used. The device is connected to an unmetered network. The device is charging.The device is notified about a new update within 24 hours after it is published by the developer, after which the app is updated the next time the constraints above are met.", + "The default update mode.The app is automatically updated with low priority to minimize the impact on the user.The app is updated when all of the following constraints are met: The device is not actively used. The device is connected to an unmetered network. The device is charging. The app to be updated is not running in the foreground.The device is notified about a new update within 24 hours after it is published by the developer, after which the app is updated the next time the constraints above are met.", "The app is not automatically updated for a maximum of 90 days after the app becomes out of date.90 days after the app becomes out of date, the latest available version is installed automatically with low priority (see AUTO_UPDATE_DEFAULT). After the app is updated it is not automatically updated again until 90 days after it becomes out of date again.The user can still manually update the app from the Play Store at any time.", "The app is updated as soon as possible. No constraints are applied.The device is notified immediately about a new update after it becomes available." ], @@ -1657,7 +1657,7 @@ "enumDescriptions": [ "Unspecified. Defaults to AVAILABLE.", "The app is automatically installed and can be removed by the user.", - "The app is automatically installed and can't be removed by the user.", + "The app is automatically installed regardless of a set maintenance window and can't be removed by the user.", "The app is blocked and can't be installed. If the app was installed under a previous policy, it will be uninstalled. This also blocks its instant app functionality.", "The app is available to install.", "The app is automatically installed and can't be removed by the user and will prevent setup from completion until installation is complete.", @@ -2021,7 +2021,7 @@ "type": "string" }, "newPassword": { - "description": "For commands of type RESET_PASSWORD, optionally specifies the new password.", + "description": "For commands of type RESET_PASSWORD, optionally specifies the new password. Note: The new password must be at least 6 characters long if it is numeric in case of Android 14 devices. Else the command will fail with INVALID_VALUE.", "type": "string" }, "resetPasswordFlags": { @@ -3850,7 +3850,9 @@ "NOT_AVAILABLE_IN_COUNTRY", "NO_LICENSES_REMAINING", "NOT_ENROLLED", - "USER_INVALID" + "USER_INVALID", + "NETWORK_ERROR_UNRELIABLE_CONNECTION", + "INSUFFICIENT_STORAGE" ], "enumDescriptions": [ "This value is disallowed.", @@ -3863,7 +3865,9 @@ "The app is not available in the user's country.", "There are no licenses available to assign to the user.", "The enterprise is no longer enrolled with Managed Google Play or the admin has not accepted the latest Managed Google Play Terms of Service.", - "The user is no longer valid. The user may have been deleted or disabled." + "The user is no longer valid. The user may have been deleted or disabled.", + "A network error on the user's device has prevented the install from succeeding. This usually happens when the device's internet connectivity is degraded, unavailable or there's a network configuration issue. Please ensure the device has access to full internet connectivity on a network that meets Android Enterprise Network Requirements (https://support.google.com/work/android/answer/10513641). App install or update will automatically resume once this is the case.", + "The user's device does not have sufficient storage space to install the app. This can be resolved by clearing up storage space on the device. App install or update will automatically resume once the device has sufficient storage." ], "type": "string" }, @@ -5591,7 +5595,7 @@ "Follow the default update behavior for the device, which typically requires the user to accept system updates.", "Install automatically as soon as an update is available.", "Install automatically within a daily maintenance window. This also configures Play apps to be updated within the window. This is strongly recommended for kiosk devices because this is the only way apps persistently pinned to the foreground can be updated by Play.If autoUpdateMode is set to AUTO_UPDATE_HIGH_PRIORITY for an app, then the maintenance window is ignored for that app and it is updated as soon as possible even outside of the maintenance window.", - "Postpone automatic install up to a maximum of 30 days." + "Postpone automatic install up to a maximum of 30 days. This policy does not affect security updates (e.g. monthly security patches)." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json index 499cf7086ac..b699f4015c2 100644 --- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json +++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json @@ -4047,7 +4047,7 @@ } } }, - "revision": "20231024", + "revision": "20231031", "rootUrl": "https://androidpublisher.googleapis.com/", "schemas": { "Abi": { @@ -5413,8 +5413,7 @@ "CAN_MANAGE_ORDERS", "CAN_MANAGE_APP_CONTENT", "CAN_VIEW_NON_FINANCIAL_DATA", - "CAN_VIEW_APP_QUALITY", - "CAN_MANAGE_DEEPLINKS" + "CAN_VIEW_APP_QUALITY" ], "enumDeprecated": [ false, @@ -5430,7 +5429,6 @@ false, false, false, - false, false ], "enumDescriptions": [ @@ -5447,8 +5445,7 @@ "Manage orders and subscriptions.", "Manage policy related pages.", "View app information (read-only).", - "View app quality data such as Vitals, Crashes etc.", - "Manage the deep links setup of an app." + "View app quality data such as Vitals, Crashes etc." ], "type": "string" }, @@ -7762,8 +7759,7 @@ "CAN_MANAGE_ORDERS_GLOBAL", "CAN_MANAGE_APP_CONTENT_GLOBAL", "CAN_VIEW_NON_FINANCIAL_DATA_GLOBAL", - "CAN_VIEW_APP_QUALITY_GLOBAL", - "CAN_MANAGE_DEEPLINKS_GLOBAL" + "CAN_VIEW_APP_QUALITY_GLOBAL" ], "enumDescriptions": [ "Unknown or unspecified permission.", @@ -7783,8 +7779,7 @@ "Manage orders and subscriptions.", "Manage policy related pages on all apps for the developer.", "View app information and download bulk reports (read-only).", - "View app quality information for all apps for the developer.", - "Manage the deep links setup for all apps for the developer." + "View app quality information for all apps for the developer." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1.json b/googleapiclient/discovery_cache/documents/apigateway.v1.json index 0be3ab6c336..be840e0251c 100644 --- a/googleapiclient/discovery_cache/documents/apigateway.v1.json +++ b/googleapiclient/discovery_cache/documents/apigateway.v1.json @@ -1083,7 +1083,7 @@ } } }, - "revision": "20231010", + "revision": "20231018", "rootUrl": "https://apigateway.googleapis.com/", "schemas": { "ApigatewayApi": { diff --git a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json index 96ab35e23af..5c7798dd714 100644 --- a/googleapiclient/discovery_cache/documents/apigateway.v1beta.json +++ b/googleapiclient/discovery_cache/documents/apigateway.v1beta.json @@ -1083,7 +1083,7 @@ } } }, - "revision": "20231010", + "revision": "20231018", "rootUrl": "https://apigateway.googleapis.com/", "schemas": { "ApigatewayApi": { diff --git a/googleapiclient/discovery_cache/documents/apikeys.v2.json b/googleapiclient/discovery_cache/documents/apikeys.v2.json index 60aaf9333cb..48bcafa1250 100644 --- a/googleapiclient/discovery_cache/documents/apikeys.v2.json +++ b/googleapiclient/discovery_cache/documents/apikeys.v2.json @@ -396,7 +396,7 @@ } } }, - "revision": "20231018", + "revision": "20231029", "rootUrl": "https://apikeys.googleapis.com/", "schemas": { "Operation": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1.json b/googleapiclient/discovery_cache/documents/appengine.v1.json index 9cc272c0540..5eda718c29c 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1.json @@ -1610,7 +1610,7 @@ } } }, - "revision": "20231016", + "revision": "20231024", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -3829,6 +3829,14 @@ "$ref": "FlexibleRuntimeSettings", "description": "Settings for App Engine flexible runtimes." }, + "generatedCustomerMetadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Additional Google Generated Customer Metadata, this field won't be provided by default and can be requested by setting the IncludeExtraData field in GetVersionRequest", + "type": "object" + }, "handlers": { "description": "An ordered list of URL-matching patterns that should be applied to incoming requests. The first matching URL handles the request and other request handlers are not attempted.Only returned in GET requests if view=FULL is set.", "items": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json index ec0ed8d605d..c8cf7320f25 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json @@ -887,7 +887,7 @@ } } }, - "revision": "20231016", + "revision": "20231030", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "AuthorizedCertificate": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1beta.json b/googleapiclient/discovery_cache/documents/appengine.v1beta.json index 7efc4fecd6a..b3ce8df8037 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1beta.json @@ -1859,7 +1859,7 @@ } } }, - "revision": "20231016", + "revision": "20231024", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { diff --git a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json index 621274c70ce..d7a2091b5e4 100644 --- a/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/area120tables.v1alpha1.json @@ -586,7 +586,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://area120tables.googleapis.com/", "schemas": { "BatchCreateRowsRequest": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json index 86630fc5b3f..64c3dac4933 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1.json @@ -1713,7 +1713,7 @@ } } }, - "revision": "20231018", + "revision": "20231025", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json index 76267d6118f..4739403b53f 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta1.json @@ -981,7 +981,7 @@ } } }, - "revision": "20231018", + "revision": "20231025", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json index ca9ca45ae14..37217770546 100644 --- a/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/artifactregistry.v1beta2.json @@ -1187,7 +1187,7 @@ } } }, - "revision": "20231018", + "revision": "20231025", "rootUrl": "https://artifactregistry.googleapis.com/", "schemas": { "AptArtifact": { diff --git a/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json b/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json index 0af2b3c3b1d..e5c72c439fa 100644 --- a/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json +++ b/googleapiclient/discovery_cache/documents/assuredworkloads.v1.json @@ -566,7 +566,7 @@ } } }, - "revision": "20231017", + "revision": "20231023", "rootUrl": "https://assuredworkloads.googleapis.com/", "schemas": { "GoogleCloudAssuredworkloadsV1AcknowledgeViolationRequest": { @@ -1121,7 +1121,7 @@ "readOnly": true }, "compliantButDisallowedServices": { - "description": "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.\"", + "description": "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.", "items": { "type": "string" }, @@ -1140,7 +1140,8 @@ }, "ekmProvisioningResponse": { "$ref": "GoogleCloudAssuredworkloadsV1WorkloadEkmProvisioningResponse", - "description": "Optional. Represents the Ekm Provisioning State of the given workload." + "description": "Output only. Represents the Ekm Provisioning State of the given workload.", + "readOnly": true }, "enableSovereignControls": { "description": "Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers.", diff --git a/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json b/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json index 339913c7791..f71b1e8e966 100644 --- a/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/assuredworkloads.v1beta1.json @@ -563,7 +563,7 @@ } } }, - "revision": "20231017", + "revision": "20231023", "rootUrl": "https://assuredworkloads.googleapis.com/", "schemas": { "GoogleCloudAssuredworkloadsV1beta1AcknowledgeViolationRequest": { @@ -1167,7 +1167,7 @@ "readOnly": true }, "compliantButDisallowedServices": { - "description": "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.\"", + "description": "Output only. Urls for services which are compliant for this Assured Workload, but which are currently disallowed by the ResourceUsageRestriction org policy. Invoke RestrictAllowedResources endpoint to allow your project developers to use these services in their environment.", "items": { "type": "string" }, @@ -1186,7 +1186,8 @@ }, "ekmProvisioningResponse": { "$ref": "GoogleCloudAssuredworkloadsV1beta1WorkloadEkmProvisioningResponse", - "description": "Optional. Represents the Ekm Provisioning State of the given workload." + "description": "Output only. Represents the Ekm Provisioning State of the given workload.", + "readOnly": true }, "enableSovereignControls": { "description": "Optional. Indicates the sovereignty status of the given workload. Currently meant to be used by Europe/Canada customers.", diff --git a/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json b/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json index a1eeea94c2c..55373890f9b 100644 --- a/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json +++ b/googleapiclient/discovery_cache/documents/authorizedbuyersmarketplace.v1.json @@ -1307,7 +1307,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://authorizedbuyersmarketplace.googleapis.com/", "schemas": { "AcceptProposalRequest": { diff --git a/googleapiclient/discovery_cache/documents/backupdr.v1.json b/googleapiclient/discovery_cache/documents/backupdr.v1.json index 812634af62e..48f6c780fc9 100644 --- a/googleapiclient/discovery_cache/documents/backupdr.v1.json +++ b/googleapiclient/discovery_cache/documents/backupdr.v1.json @@ -535,7 +535,7 @@ } } }, - "revision": "20231008", + "revision": "20231013", "rootUrl": "https://backupdr.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json b/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json index edda7046fcb..b2c87f79af7 100644 --- a/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json +++ b/googleapiclient/discovery_cache/documents/baremetalsolution.v2.json @@ -1613,7 +1613,7 @@ } } }, - "revision": "20231018", + "revision": "20231023", "rootUrl": "https://baremetalsolution.googleapis.com/", "schemas": { "AllowedClient": { diff --git a/googleapiclient/discovery_cache/documents/batch.v1.json b/googleapiclient/discovery_cache/documents/batch.v1.json index 0de4d032afb..3ab620498f5 100644 --- a/googleapiclient/discovery_cache/documents/batch.v1.json +++ b/googleapiclient/discovery_cache/documents/batch.v1.json @@ -561,7 +561,7 @@ } } }, - "revision": "20231009", + "revision": "20231018", "rootUrl": "https://batch.googleapis.com/", "schemas": { "Accelerator": { @@ -1835,6 +1835,10 @@ "$ref": "AgentTask" }, "type": "array" + }, + "useBatchMonitoredResource": { + "description": "If true, the cloud logging for batch agent will use batch.googleapis.com/Job as monitored resource for Batch job related logging.", + "type": "boolean" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/biglake.v1.json b/googleapiclient/discovery_cache/documents/biglake.v1.json new file mode 100644 index 00000000000..fdd496eb420 --- /dev/null +++ b/googleapiclient/discovery_cache/documents/biglake.v1.json @@ -0,0 +1,910 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/bigquery": { + "description": "View and manage your data in Google BigQuery and see the email address for your Google Account" + }, + "https://www.googleapis.com/auth/cloud-platform": { + "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account." + } + } + } + }, + "basePath": "", + "baseUrl": "https://biglake.googleapis.com/", + "batchPath": "batch", + "canonicalName": "BigLake Service", + "description": "The BigLake API provides access to BigLake Metastore, a serverless, fully managed, and highly available metastore for open-source data that can be used for querying Apache Iceberg tables in BigQuery.", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/bigquery/", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "biglake:v1", + "kind": "discovery#restDescription", + "mtlsRootUrl": "https://biglake.mtls.googleapis.com/", + "name": "biglake", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "projects": { + "resources": { + "locations": { + "resources": { + "catalogs": { + "methods": { + "create": { + "description": "Creates a new catalog.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs", + "httpMethod": "POST", + "id": "biglake.projects.locations.catalogs.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "catalogId": { + "description": "Required. The ID to use for the catalog, which will become the final component of the catalog's resource name.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource where this catalog will be created. Format: projects/{project_id_or_number}/locations/{location_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/catalogs", + "request": { + "$ref": "Catalog" + }, + "response": { + "$ref": "Catalog" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes an existing catalog specified by the catalog ID.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}", + "httpMethod": "DELETE", + "id": "biglake.projects.locations.catalogs.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the catalog to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Catalog" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the catalog specified by the resource name.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}", + "httpMethod": "GET", + "id": "biglake.projects.locations.catalogs.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the catalog to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Catalog" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List all catalogs in a specified project.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs", + "httpMethod": "GET", + "id": "biglake.projects.locations.catalogs.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of catalogs to return. The service may return fewer than this value. If unspecified, at most 50 catalogs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token, received from a previous `ListCatalogs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListCatalogs` must match the call that provided the page token.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent, which owns this collection of catalogs. Format: projects/{project_id_or_number}/locations/{location_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/catalogs", + "response": { + "$ref": "ListCatalogsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "databases": { + "methods": { + "create": { + "description": "Creates a new database.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases", + "httpMethod": "POST", + "id": "biglake.projects.locations.catalogs.databases.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "databaseId": { + "description": "Required. The ID to use for the database, which will become the final component of the database's resource name.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent resource where this database will be created. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/databases", + "request": { + "$ref": "Database" + }, + "response": { + "$ref": "Database" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes an existing database specified by the database ID.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}", + "httpMethod": "DELETE", + "id": "biglake.projects.locations.catalogs.databases.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the database to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Database" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the database specified by the resource name.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}", + "httpMethod": "GET", + "id": "biglake.projects.locations.catalogs.databases.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the database to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Database" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List all databases in a specified catalog.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases", + "httpMethod": "GET", + "id": "biglake.projects.locations.catalogs.databases.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of databases to return. The service may return fewer than this value. If unspecified, at most 50 databases will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token, received from a previous `ListDatabases` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDatabases` must match the call that provided the page token.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent, which owns this collection of databases. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/databases", + "response": { + "$ref": "ListDatabasesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates an existing database specified by the database ID.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}", + "httpMethod": "PATCH", + "id": "biglake.projects.locations.catalogs.databases.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "The list of fields to update. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "Database" + }, + "response": { + "$ref": "Database" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "tables": { + "methods": { + "create": { + "description": "Creates a new table.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}/tables", + "httpMethod": "POST", + "id": "biglake.projects.locations.catalogs.databases.tables.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The parent resource where this table will be created. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + }, + "tableId": { + "description": "Required. The ID to use for the table, which will become the final component of the table's resource name.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/tables", + "request": { + "$ref": "Table" + }, + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes an existing table specified by the table ID.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}/tables/{tablesId}", + "httpMethod": "DELETE", + "id": "biglake.projects.locations.catalogs.databases.tables.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the table to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+/tables/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the table specified by the resource name.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}/tables/{tablesId}", + "httpMethod": "GET", + "id": "biglake.projects.locations.catalogs.databases.tables.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the table to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+/tables/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "List all tables in a specified database.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}/tables", + "httpMethod": "GET", + "id": "biglake.projects.locations.catalogs.databases.tables.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "The maximum number of tables to return. The service may return fewer than this value. If unspecified, at most 50 tables will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A page token, received from a previous `ListTables` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListTables` must match the call that provided the page token.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The parent, which owns this collection of tables. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+$", + "required": true, + "type": "string" + }, + "view": { + "description": "The view for the returned tables.", + "enum": [ + "TABLE_VIEW_UNSPECIFIED", + "BASIC", + "FULL" + ], + "enumDescriptions": [ + "Default value. The API will default to the BASIC view.", + "Include only table names. This is the default value.", + "Include everything." + ], + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/tables", + "response": { + "$ref": "ListTablesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates an existing table specified by the table ID.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}/tables/{tablesId}", + "httpMethod": "PATCH", + "id": "biglake.projects.locations.catalogs.databases.tables.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+/tables/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "The list of fields to update. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "Table" + }, + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "rename": { + "description": "Renames an existing table specified by the table ID.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/catalogs/{catalogsId}/databases/{databasesId}/tables/{tablesId}:rename", + "httpMethod": "POST", + "id": "biglake.projects.locations.catalogs.databases.tables.rename", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The table's `name` field is used to identify the table to rename. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/catalogs/[^/]+/databases/[^/]+/tables/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:rename", + "request": { + "$ref": "RenameTableRequest" + }, + "response": { + "$ref": "Table" + }, + "scopes": [ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + } + } + } + } + } + }, + "revision": "20231023", + "rootUrl": "https://biglake.googleapis.com/", + "schemas": { + "Catalog": { + "description": "Catalog is the container of databases.", + "id": "Catalog", + "properties": { + "createTime": { + "description": "Output only. The creation time of the catalog.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "deleteTime": { + "description": "Output only. The deletion time of the catalog. Only set after the catalog is deleted.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "expireTime": { + "description": "Output only. The time when this catalog is considered expired. Only set after the catalog is deleted.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "name": { + "description": "Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}", + "readOnly": true, + "type": "string" + }, + "updateTime": { + "description": "Output only. The last modification time of the catalog.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "Database": { + "description": "Database is the container of tables.", + "id": "Database", + "properties": { + "createTime": { + "description": "Output only. The creation time of the database.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "deleteTime": { + "description": "Output only. The deletion time of the database. Only set after the database is deleted.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "expireTime": { + "description": "Output only. The time when this database is considered expired. Only set after the database is deleted.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "hiveOptions": { + "$ref": "HiveDatabaseOptions", + "description": "Options of a Hive database." + }, + "name": { + "description": "Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}", + "readOnly": true, + "type": "string" + }, + "type": { + "description": "The database type.", + "enum": [ + "TYPE_UNSPECIFIED", + "HIVE" + ], + "enumDescriptions": [ + "The type is not specified.", + "Represents a database storing tables compatible with Hive Metastore tables." + ], + "type": "string" + }, + "updateTime": { + "description": "Output only. The last modification time of the database.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "HiveDatabaseOptions": { + "description": "Options of a Hive database.", + "id": "HiveDatabaseOptions", + "properties": { + "locationUri": { + "description": "Cloud Storage folder URI where the database data is stored, starting with \"gs://\".", + "type": "string" + }, + "parameters": { + "additionalProperties": { + "type": "string" + }, + "description": "Stores user supplied Hive database parameters.", + "type": "object" + } + }, + "type": "object" + }, + "HiveTableOptions": { + "description": "Options of a Hive table.", + "id": "HiveTableOptions", + "properties": { + "parameters": { + "additionalProperties": { + "type": "string" + }, + "description": "Stores user supplied Hive table parameters.", + "type": "object" + }, + "storageDescriptor": { + "$ref": "StorageDescriptor", + "description": "Stores physical storage information of the data." + }, + "tableType": { + "description": "Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE.", + "type": "string" + } + }, + "type": "object" + }, + "ListCatalogsResponse": { + "description": "Response message for the ListCatalogs method.", + "id": "ListCatalogsResponse", + "properties": { + "catalogs": { + "description": "The catalogs from the specified project.", + "items": { + "$ref": "Catalog" + }, + "type": "array" + }, + "nextPageToken": { + "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + } + }, + "type": "object" + }, + "ListDatabasesResponse": { + "description": "Response message for the ListDatabases method.", + "id": "ListDatabasesResponse", + "properties": { + "databases": { + "description": "The databases from the specified catalog.", + "items": { + "$ref": "Database" + }, + "type": "array" + }, + "nextPageToken": { + "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + } + }, + "type": "object" + }, + "ListTablesResponse": { + "description": "Response message for the ListTables method.", + "id": "ListTablesResponse", + "properties": { + "nextPageToken": { + "description": "A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.", + "type": "string" + }, + "tables": { + "description": "The tables from the specified database.", + "items": { + "$ref": "Table" + }, + "type": "array" + } + }, + "type": "object" + }, + "RenameTableRequest": { + "description": "Request message for the RenameTable method in MetastoreService", + "id": "RenameTableRequest", + "properties": { + "newName": { + "description": "Required. The new `name` for the specified table, must be in the same database. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}", + "type": "string" + } + }, + "type": "object" + }, + "SerDeInfo": { + "description": "Serializer and deserializer information.", + "id": "SerDeInfo", + "properties": { + "serializationLib": { + "description": "The fully qualified Java class name of the serialization library.", + "type": "string" + } + }, + "type": "object" + }, + "StorageDescriptor": { + "description": "Stores physical storage information of the data.", + "id": "StorageDescriptor", + "properties": { + "inputFormat": { + "description": "The fully qualified Java class name of the input format.", + "type": "string" + }, + "locationUri": { + "description": "Cloud Storage folder URI where the table data is stored, starting with \"gs://\".", + "type": "string" + }, + "outputFormat": { + "description": "The fully qualified Java class name of the output format.", + "type": "string" + }, + "serdeInfo": { + "$ref": "SerDeInfo", + "description": "Serializer and deserializer information." + } + }, + "type": "object" + }, + "Table": { + "description": "Represents a table.", + "id": "Table", + "properties": { + "createTime": { + "description": "Output only. The creation time of the table.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "deleteTime": { + "description": "Output only. The deletion time of the table. Only set after the table is deleted.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "etag": { + "description": "The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations.", + "type": "string" + }, + "expireTime": { + "description": "Output only. The time when this table is considered expired. Only set after the table is deleted.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "hiveOptions": { + "$ref": "HiveTableOptions", + "description": "Options of a Hive table." + }, + "name": { + "description": "Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id}", + "readOnly": true, + "type": "string" + }, + "type": { + "description": "The table type.", + "enum": [ + "TYPE_UNSPECIFIED", + "HIVE" + ], + "enumDescriptions": [ + "The type is not specified.", + "Represents a table compatible with Hive Metastore tables." + ], + "type": "string" + }, + "updateTime": { + "description": "Output only. The last modification time of the table.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "BigLake API", + "version": "v1", + "version_module": true +} \ No newline at end of file diff --git a/googleapiclient/discovery_cache/documents/bigquery.v2.json b/googleapiclient/discovery_cache/documents/bigquery.v2.json index cf6eebeacee..13a53472193 100644 --- a/googleapiclient/discovery_cache/documents/bigquery.v2.json +++ b/googleapiclient/discovery_cache/documents/bigquery.v2.json @@ -1686,7 +1686,7 @@ } } }, - "revision": "20231012", + "revision": "20231021", "rootUrl": "https://bigquery.googleapis.com/", "schemas": { "AggregateClassificationMetrics": { diff --git a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json index 8ad5c39a74f..eb0468c9b6a 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryconnection.v1beta1.json @@ -395,7 +395,7 @@ } } }, - "revision": "20231013", + "revision": "20231021", "rootUrl": "https://bigqueryconnection.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json index a40f47fe919..ccf52d50c73 100644 --- a/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json +++ b/googleapiclient/discovery_cache/documents/bigquerydatatransfer.v1.json @@ -1342,7 +1342,7 @@ } } }, - "revision": "20231017", + "revision": "20231021", "rootUrl": "https://bigquerydatatransfer.googleapis.com/", "schemas": { "CheckValidCredsRequest": { diff --git a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json index 816bfc3ffcf..0a995548b1a 100644 --- a/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json +++ b/googleapiclient/discovery_cache/documents/bigqueryreservation.v1.json @@ -831,7 +831,7 @@ } } }, - "revision": "20231015", + "revision": "20231026", "rootUrl": "https://bigqueryreservation.googleapis.com/", "schemas": { "Assignment": { diff --git a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json index 8f947b7cd3f..84a55517275 100644 --- a/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json +++ b/googleapiclient/discovery_cache/documents/bigtableadmin.v2.json @@ -1966,7 +1966,7 @@ } } }, - "revision": "20231013", + "revision": "20231024", "rootUrl": "https://bigtableadmin.googleapis.com/", "schemas": { "AppProfile": { diff --git a/googleapiclient/discovery_cache/documents/billingbudgets.v1.json b/googleapiclient/discovery_cache/documents/billingbudgets.v1.json index 2f8c851e0ee..99891ec2379 100644 --- a/googleapiclient/discovery_cache/documents/billingbudgets.v1.json +++ b/googleapiclient/discovery_cache/documents/billingbudgets.v1.json @@ -275,7 +275,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://billingbudgets.googleapis.com/", "schemas": { "GoogleCloudBillingBudgetsV1Budget": { diff --git a/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json b/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json index 992ee0593c9..a7c6377f369 100644 --- a/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/billingbudgets.v1beta1.json @@ -269,7 +269,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://billingbudgets.googleapis.com/", "schemas": { "GoogleCloudBillingBudgetsV1beta1AllUpdatesRule": { diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json index c4c3e4ff842..0ad41eaf802 100644 --- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json +++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1.json @@ -706,7 +706,7 @@ } } }, - "revision": "20231020", + "revision": "20231027", "rootUrl": "https://binaryauthorization.googleapis.com/", "schemas": { "AdmissionRule": { diff --git a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json index a3c951596eb..b02824e393c 100644 --- a/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/binaryauthorization.v1beta1.json @@ -551,7 +551,7 @@ } } }, - "revision": "20231020", + "revision": "20231027", "rootUrl": "https://binaryauthorization.googleapis.com/", "schemas": { "AdmissionRule": { diff --git a/googleapiclient/discovery_cache/documents/blockchainnodeengine.v1.json b/googleapiclient/discovery_cache/documents/blockchainnodeengine.v1.json index 39c45669df2..cd33a852498 100644 --- a/googleapiclient/discovery_cache/documents/blockchainnodeengine.v1.json +++ b/googleapiclient/discovery_cache/documents/blockchainnodeengine.v1.json @@ -487,7 +487,7 @@ } } }, - "revision": "20231018", + "revision": "20231025", "rootUrl": "https://blockchainnodeengine.googleapis.com/", "schemas": { "BlockchainNode": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v2.json b/googleapiclient/discovery_cache/documents/blogger.v2.json index f091d74fc5b..4835eba377f 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v2.json +++ b/googleapiclient/discovery_cache/documents/blogger.v2.json @@ -401,7 +401,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { diff --git a/googleapiclient/discovery_cache/documents/blogger.v3.json b/googleapiclient/discovery_cache/documents/blogger.v3.json index 278dd78a34f..3af5afe76cd 100644 --- a/googleapiclient/discovery_cache/documents/blogger.v3.json +++ b/googleapiclient/discovery_cache/documents/blogger.v3.json @@ -1710,7 +1710,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://blogger.googleapis.com/", "schemas": { "Blog": { diff --git a/googleapiclient/discovery_cache/documents/books.v1.json b/googleapiclient/discovery_cache/documents/books.v1.json index 3a6de89e973..e7433d915a8 100644 --- a/googleapiclient/discovery_cache/documents/books.v1.json +++ b/googleapiclient/discovery_cache/documents/books.v1.json @@ -2672,7 +2672,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://books.googleapis.com/", "schemas": { "Annotation": { diff --git a/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json b/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json index 14cac082b48..b6db949c53b 100644 --- a/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json +++ b/googleapiclient/discovery_cache/documents/businessprofileperformance.v1.json @@ -417,7 +417,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://businessprofileperformance.googleapis.com/", "schemas": { "DailyMetricTimeSeries": { diff --git a/googleapiclient/discovery_cache/documents/calendar.v3.json b/googleapiclient/discovery_cache/documents/calendar.v3.json index 86d031cecb7..be4f3e86016 100644 --- a/googleapiclient/discovery_cache/documents/calendar.v3.json +++ b/googleapiclient/discovery_cache/documents/calendar.v3.json @@ -1735,7 +1735,7 @@ } } }, - "revision": "20230929", + "revision": "20231020", "rootUrl": "https://www.googleapis.com/", "schemas": { "Acl": { diff --git a/googleapiclient/discovery_cache/documents/chat.v1.json b/googleapiclient/discovery_cache/documents/chat.v1.json index 33948ac5d23..5c71f4b9a5d 100644 --- a/googleapiclient/discovery_cache/documents/chat.v1.json +++ b/googleapiclient/discovery_cache/documents/chat.v1.json @@ -957,7 +957,7 @@ } } }, - "revision": "20231019", + "revision": "20231026", "rootUrl": "https://chat.googleapis.com/", "schemas": { "ActionParameter": { diff --git a/googleapiclient/discovery_cache/documents/checks.v1alpha.json b/googleapiclient/discovery_cache/documents/checks.v1alpha.json index ea4be0d6667..f3f99623ff4 100644 --- a/googleapiclient/discovery_cache/documents/checks.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/checks.v1alpha.json @@ -414,7 +414,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://checks.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json index 3d0f47c5e23..a9e96fef056 100644 --- a/googleapiclient/discovery_cache/documents/chromemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/chromemanagement.v1.json @@ -1040,7 +1040,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://chromemanagement.googleapis.com/", "schemas": { "GoogleChromeManagementV1AndroidAppInfo": { diff --git a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json index 2c64580e301..940ed31eeb2 100644 --- a/googleapiclient/discovery_cache/documents/chromepolicy.v1.json +++ b/googleapiclient/discovery_cache/documents/chromepolicy.v1.json @@ -557,7 +557,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://chromepolicy.googleapis.com/", "schemas": { "GoogleChromePolicyVersionsV1AdditionalTargetKeyName": { diff --git a/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json b/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json index 0fe0840686e..71f74987915 100644 --- a/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json +++ b/googleapiclient/discovery_cache/documents/chromeuxreport.v1.json @@ -131,7 +131,7 @@ } } }, - "revision": "20231019", + "revision": "20231026", "rootUrl": "https://chromeuxreport.googleapis.com/", "schemas": { "Bin": { diff --git a/googleapiclient/discovery_cache/documents/classroom.v1.json b/googleapiclient/discovery_cache/documents/classroom.v1.json index 5700916cb99..1240de7eeb0 100644 --- a/googleapiclient/discovery_cache/documents/classroom.v1.json +++ b/googleapiclient/discovery_cache/documents/classroom.v1.json @@ -2400,7 +2400,7 @@ } } }, - "revision": "20231017", + "revision": "20231022", "rootUrl": "https://classroom.googleapis.com/", "schemas": { "Announcement": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1.json index 4933daf4de7..e95109dfc15 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1.json @@ -1095,7 +1095,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AccessSelector": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json index 3afad6633b7..111699b02de 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1beta1.json @@ -411,7 +411,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json index 26d19b8c079..7aeb6ff8dec 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p1beta1.json @@ -207,7 +207,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json index 172af7f7125..c88ae925073 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p5beta1.json @@ -177,7 +177,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json index 6956955735e..a6fce18b278 100644 --- a/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudasset.v1p7beta1.json @@ -167,7 +167,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://cloudasset.googleapis.com/", "schemas": { "AnalyzeIamPolicyLongrunningMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json index 843e45f4c9d..cc825aeec27 100644 --- a/googleapiclient/discovery_cache/documents/cloudbilling.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudbilling.v1.json @@ -119,7 +119,13 @@ "httpMethod": "POST", "id": "cloudbilling.billingAccounts.create", "parameterOrder": [], - "parameters": {}, + "parameters": { + "parent": { + "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "query", + "type": "string" + } + }, "path": "v1/billingAccounts", "request": { "$ref": "BillingAccount" @@ -214,6 +220,11 @@ "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.", "location": "query", "type": "string" + }, + "parent": { + "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "query", + "type": "string" } }, "path": "v1/billingAccounts", @@ -226,6 +237,35 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "move": { + "description": "Changes which parent organization a billing account belongs to.", + "flatPath": "v1/billingAccounts/{billingAccountsId}:move", + "httpMethod": "POST", + "id": "cloudbilling.billingAccounts.move", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account.", + "location": "path", + "pattern": "^billingAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:move", + "request": { + "$ref": "MoveBillingAccountRequest" + }, + "response": { + "$ref": "BillingAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "patch": { "description": "Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account.", "flatPath": "v1/billingAccounts/{billingAccountsId}", @@ -363,6 +403,196 @@ ] } } + }, + "subAccounts": { + "methods": { + "create": { + "description": "This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.", + "flatPath": "v1/billingAccounts/{billingAccountsId}/subAccounts", + "httpMethod": "POST", + "id": "cloudbilling.billingAccounts.subAccounts.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "path", + "pattern": "^billingAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/subAccounts", + "request": { + "$ref": "BillingAccount" + }, + "response": { + "$ref": "BillingAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).", + "flatPath": "v1/billingAccounts/{billingAccountsId}/subAccounts", + "httpMethod": "GET", + "id": "cloudbilling.billingAccounts.subAccounts.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Requested page size. The maximum page size is 100; this is also the default.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "path", + "pattern": "^billingAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/subAccounts", + "response": { + "$ref": "ListBillingAccountsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "organizations": { + "resources": { + "billingAccounts": { + "methods": { + "create": { + "description": "This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.", + "flatPath": "v1/organizations/{organizationsId}/billingAccounts", + "httpMethod": "POST", + "id": "cloudbilling.organizations.billingAccounts.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/billingAccounts", + "request": { + "$ref": "BillingAccount" + }, + "response": { + "$ref": "BillingAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).", + "flatPath": "v1/organizations/{organizationsId}/billingAccounts", + "httpMethod": "GET", + "id": "cloudbilling.organizations.billingAccounts.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. \"master_billing_account=billingAccounts/012345-678901-ABCDEF\"). Boolean algebra and other fields are not currently supported.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Requested page size. The maximum page size is 100; this is also the default.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF`", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/billingAccounts", + "response": { + "$ref": "ListBillingAccountsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-billing.readonly", + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "move": { + "description": "Changes which parent organization a billing account belongs to.", + "flatPath": "v1/organizations/{organizationsId}/billingAccounts/{billingAccountsId}:move", + "httpMethod": "GET", + "id": "cloudbilling.organizations.billingAccounts.move", + "parameterOrder": [ + "destinationParent", + "name" + ], + "parameters": { + "destinationParent": { + "description": "Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`.", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + }, + "name": { + "description": "Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account.", + "location": "path", + "pattern": "^billingAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+destinationParent}/{+name}:move", + "response": { + "$ref": "BillingAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-billing", + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } } } }, @@ -521,7 +751,7 @@ } } }, - "revision": "20231020", + "revision": "20231025", "rootUrl": "https://cloudbilling.googleapis.com/", "schemas": { "AggregationInfo": { @@ -826,6 +1056,17 @@ }, "type": "object" }, + "MoveBillingAccountRequest": { + "description": "Request message for `MoveBillingAccount` RPC.", + "id": "MoveBillingAccountRequest", + "properties": { + "destinationParent": { + "description": "Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`.", + "type": "string" + } + }, + "type": "object" + }, "Policy": { "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time < timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", diff --git a/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json b/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json index 8183e2a1889..55a985a975e 100644 --- a/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json +++ b/googleapiclient/discovery_cache/documents/cloudbilling.v1beta.json @@ -664,7 +664,7 @@ } } }, - "revision": "20231020", + "revision": "20231025", "rootUrl": "https://cloudbilling.googleapis.com/", "schemas": { "CacheFillRegions": { diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json index 1daafe9f791..ac881cb2e86 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v1.json @@ -2319,7 +2319,7 @@ } } }, - "revision": "20231017", + "revision": "20231019", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "ApprovalConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v2.json b/googleapiclient/discovery_cache/documents/cloudbuild.v2.json index ea94b971355..dc24cfe5666 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v2.json @@ -844,7 +844,7 @@ } } }, - "revision": "20231017", + "revision": "20231019", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json index c1a66c2cacd..9e972ce2bfd 100644 --- a/googleapiclient/discovery_cache/documents/cloudchannel.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudchannel.v1.json @@ -2178,7 +2178,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://cloudchannel.googleapis.com/", "schemas": { "GoogleCloudChannelV1ActivateEntitlementRequest": { diff --git a/googleapiclient/discovery_cache/documents/clouddeploy.v1.json b/googleapiclient/discovery_cache/documents/clouddeploy.v1.json index 78c57365296..55fda9edc13 100644 --- a/googleapiclient/discovery_cache/documents/clouddeploy.v1.json +++ b/googleapiclient/discovery_cache/documents/clouddeploy.v1.json @@ -1479,7 +1479,7 @@ } } }, - "revision": "20231015", + "revision": "20231018", "rootUrl": "https://clouddeploy.googleapis.com/", "schemas": { "AbandonReleaseRequest": { diff --git a/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json b/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json index cc708d65608..5b792e8ce49 100644 --- a/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/clouderrorreporting.v1beta1.json @@ -430,7 +430,7 @@ } } }, - "revision": "20231013", + "revision": "20231018", "rootUrl": "https://clouderrorreporting.googleapis.com/", "schemas": { "DeleteEventsResponse": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json index 8643c3ed044..df36d80ec73 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v1.json @@ -552,7 +552,7 @@ } } }, - "revision": "20231012", + "revision": "20231023", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v2.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v2.json index 77e69d9f74a..75fe5c6d28b 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v2.json @@ -571,7 +571,7 @@ } } }, - "revision": "20231012", + "revision": "20231023", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v2alpha.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v2alpha.json index bd21e25c137..bf2074ed802 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v2alpha.json @@ -571,7 +571,7 @@ } } }, - "revision": "20231012", + "revision": "20231023", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudfunctions.v2beta.json b/googleapiclient/discovery_cache/documents/cloudfunctions.v2beta.json index 7886103b903..43f135fd2c7 100644 --- a/googleapiclient/discovery_cache/documents/cloudfunctions.v2beta.json +++ b/googleapiclient/discovery_cache/documents/cloudfunctions.v2beta.json @@ -571,7 +571,7 @@ } } }, - "revision": "20231012", + "revision": "20231023", "rootUrl": "https://cloudfunctions.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json index 94e229587a3..941bda63342 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1.json @@ -1990,7 +1990,7 @@ } } }, - "revision": "20231010", + "revision": "20231024", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AddIdpCredentialOperationMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json index 1e90d8ecdc5..148ee9c1c93 100644 --- a/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudidentity.v1beta1.json @@ -2015,7 +2015,7 @@ } } }, - "revision": "20231010", + "revision": "20231024", "rootUrl": "https://cloudidentity.googleapis.com/", "schemas": { "AddIdpCredentialOperationMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudkms.v1.json b/googleapiclient/discovery_cache/documents/cloudkms.v1.json index 22bbe9d01dc..e03061a8119 100644 --- a/googleapiclient/discovery_cache/documents/cloudkms.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudkms.v1.json @@ -1828,7 +1828,7 @@ } } }, - "revision": "20231012", + "revision": "20231025", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AsymmetricDecryptRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json index 0ad9631edd7..24e97bf1d1c 100644 --- a/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudprofiler.v2.json @@ -216,7 +216,7 @@ } } }, - "revision": "20231009", + "revision": "20231023", "rootUrl": "https://cloudprofiler.googleapis.com/", "schemas": { "CreateProfileRequest": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json index 89ed1cdb628..bba1f274994 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1.json @@ -1171,7 +1171,7 @@ } } }, - "revision": "20231022", + "revision": "20231025", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json index 4c7424bce16..5dc605db18b 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v1beta1.json @@ -568,7 +568,7 @@ } } }, - "revision": "20231022", + "revision": "20231025", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json index 24a4bc3a573..fef863c22c3 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2.json @@ -450,7 +450,7 @@ } } }, - "revision": "20231022", + "revision": "20231025", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json index b7516f85b2a..1a80c4c4c76 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v2beta1.json @@ -450,7 +450,7 @@ } } }, - "revision": "20231022", + "revision": "20231025", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json index 6f80d112a0a..c605085b871 100644 --- a/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json +++ b/googleapiclient/discovery_cache/documents/cloudresourcemanager.v3.json @@ -1805,7 +1805,7 @@ } } }, - "revision": "20231022", + "revision": "20231025", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json b/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json index 7b6797647d8..3a5e340fab5 100644 --- a/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudscheduler.v1.json @@ -418,7 +418,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://cloudscheduler.googleapis.com/", "schemas": { "AppEngineHttpTarget": { diff --git a/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json b/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json index abb52e8a1ca..9a5b28b9614 100644 --- a/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/cloudscheduler.v1beta1.json @@ -433,7 +433,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://cloudscheduler.googleapis.com/", "schemas": { "AppEngineHttpTarget": { diff --git a/googleapiclient/discovery_cache/documents/cloudsearch.v1.json b/googleapiclient/discovery_cache/documents/cloudsearch.v1.json index 22a42e751df..2d1a6c568da 100644 --- a/googleapiclient/discovery_cache/documents/cloudsearch.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudsearch.v1.json @@ -2096,7 +2096,7 @@ } } }, - "revision": "20231004", + "revision": "20231025", "rootUrl": "https://cloudsearch.googleapis.com/", "schemas": { "Action": { diff --git a/googleapiclient/discovery_cache/documents/cloudshell.v1.json b/googleapiclient/discovery_cache/documents/cloudshell.v1.json index 93e9d580b90..e1fc6e1dcf7 100644 --- a/googleapiclient/discovery_cache/documents/cloudshell.v1.json +++ b/googleapiclient/discovery_cache/documents/cloudshell.v1.json @@ -374,7 +374,7 @@ } } }, - "revision": "20231023", + "revision": "20231027", "rootUrl": "https://cloudshell.googleapis.com/", "schemas": { "AddPublicKeyMetadata": { diff --git a/googleapiclient/discovery_cache/documents/cloudsupport.v2.json b/googleapiclient/discovery_cache/documents/cloudsupport.v2.json index 1c63e8b7449..158d0385495 100644 --- a/googleapiclient/discovery_cache/documents/cloudsupport.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudsupport.v2.json @@ -552,7 +552,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://cloudsupport.googleapis.com/", "schemas": { "Actor": { diff --git a/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json b/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json index e060dfbe2d8..e9c6663e927 100644 --- a/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json +++ b/googleapiclient/discovery_cache/documents/cloudsupport.v2beta.json @@ -548,7 +548,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://cloudsupport.googleapis.com/", "schemas": { "Actor": { diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2.json index cfe75138189..1dfcd503cce 100644 --- a/googleapiclient/discovery_cache/documents/cloudtasks.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2.json @@ -267,7 +267,7 @@ ] }, "delete": { - "description": "Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.", + "description": "Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}", "httpMethod": "DELETE", "id": "cloudtasks.projects.locations.queues.delete", @@ -744,7 +744,7 @@ } } }, - "revision": "20230929", + "revision": "20231018", "rootUrl": "https://cloudtasks.googleapis.com/", "schemas": { "AppEngineHttpRequest": { @@ -870,7 +870,7 @@ "type": "object" }, "CmekConfig": { - "description": "CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer\u2019s hands.", + "description": "Describes the customer-managed encryption key (CMEK) configuration associated with a project and location.", "id": "CmekConfig", "properties": { "kmsKey": { diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json index 4780f717411..b83b74b749c 100644 --- a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json +++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta2.json @@ -636,7 +636,7 @@ ] }, "buffer": { - "description": "Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8).", + "description": "Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer.", "flatPath": "v2beta2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks/{taskId}:buffer", "httpMethod": "POST", "id": "cloudtasks.projects.locations.queues.tasks.buffer", @@ -935,7 +935,7 @@ } } }, - "revision": "20230929", + "revision": "20231018", "rootUrl": "https://cloudtasks.googleapis.com/", "schemas": { "AcknowledgeTaskRequest": { @@ -1132,7 +1132,7 @@ "type": "object" }, "CmekConfig": { - "description": "CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer\u2019s hands.", + "description": "Describes the customer-managed encryption key (CMEK) configuration associated with a project and location.", "id": "CmekConfig", "properties": { "kmsKey": { diff --git a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json index 48613474c8c..9c8436a2465 100644 --- a/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json +++ b/googleapiclient/discovery_cache/documents/cloudtasks.v2beta3.json @@ -267,7 +267,7 @@ ] }, "delete": { - "description": "Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.", + "description": "Deletes a queue. This command will delete the queue even if it has tasks in it. Note : If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.", "flatPath": "v2beta3/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}", "httpMethod": "DELETE", "id": "cloudtasks.projects.locations.queues.delete", @@ -576,7 +576,7 @@ "tasks": { "methods": { "buffer": { - "description": "Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8).", + "description": "Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer.", "flatPath": "v2beta3/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks/{taskId}:buffer", "httpMethod": "POST", "id": "cloudtasks.projects.locations.queues.tasks.buffer", @@ -791,7 +791,7 @@ } } }, - "revision": "20230929", + "revision": "20231018", "rootUrl": "https://cloudtasks.googleapis.com/", "schemas": { "AppEngineHttpQueue": { @@ -950,7 +950,7 @@ "type": "object" }, "CmekConfig": { - "description": "CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer\u2019s hands.", + "description": "Describes the customer-managed encryption key (CMEK) configuration associated with a project and location.", "id": "CmekConfig", "properties": { "kmsKey": { diff --git a/googleapiclient/discovery_cache/documents/compute.alpha.json b/googleapiclient/discovery_cache/documents/compute.alpha.json index 428a55324df..6a7fd6edd14 100644 --- a/googleapiclient/discovery_cache/documents/compute.alpha.json +++ b/googleapiclient/discovery_cache/documents/compute.alpha.json @@ -11418,6 +11418,59 @@ "https://www.googleapis.com/auth/compute" ] }, + "deleteNetworkInterface": { + "description": "Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/deleteNetworkInterface", + "httpMethod": "POST", + "id": "compute.instances.deleteNetworkInterface", + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterfaceName" + ], + "parameters": { + "instance": { + "description": "The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer.", + "location": "path", + "required": true, + "type": "string" + }, + "networkInterfaceName": { + "description": "The name of the network interface to be deleted from the instance. Only VLAN network interface deletion is supported.", + "location": "query", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/deleteNetworkInterface", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "detachDisk": { "description": "Detaches a disk from an instance.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/detachDisk", @@ -43489,7 +43542,7 @@ } } }, - "revision": "20231011", + "revision": "20231017", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -46786,7 +46839,7 @@ "type": "string" }, "ipAddressSelectionPolicy": { - "description": "Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", + "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", "enum": [ "IPV4_ONLY", "IPV6_ONLY", @@ -48694,6 +48747,13 @@ "description": "[Output Only] Commitment end time in RFC3339 text format.", "type": "string" }, + "existingReservations": { + "description": "Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation ", + "items": { + "type": "string" + }, + "type": "array" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -68023,10 +68083,6 @@ "description": "Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used.", "format": "int32", "type": "integer" - }, - "zone": { - "description": "The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group.", - "type": "string" } }, "type": "object" @@ -74156,7 +74212,7 @@ "description": "[Output Only] The Cloud Armor Managed Protection (CAMP) tier for this project. It can be one of the following values: CA_STANDARD, CAMP_PLUS_MONTHLY. If this field is not specified, it is assumed to be CA_STANDARD.", "enum": [ "CAMP_PLUS_ANNUAL", - "CAMP_PLUS_MONTHLY", + "CAMP_PLUS_PAYGO", "CA_STANDARD" ], "enumDescriptions": [ @@ -74313,7 +74369,7 @@ "description": "Managed protection tier to be set.", "enum": [ "CAMP_PLUS_ANNUAL", - "CAMP_PLUS_MONTHLY", + "CAMP_PLUS_PAYGO", "CA_STANDARD" ], "enumDescriptions": [ @@ -79603,6 +79659,10 @@ "description": "Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls.", "id": "ResourceStatus", "properties": { + "lastInstanceTerminationDetails": { + "$ref": "ResourceStatusLastInstanceTerminationDetails", + "description": "[Output Only] Contains last termination details why the instance was terminated." + }, "physicalHost": { "description": "[Output Only] An opaque ID of the host on which the VM is running.", "type": "string" @@ -79620,6 +79680,48 @@ }, "type": "object" }, + "ResourceStatusLastInstanceTerminationDetails": { + "id": "ResourceStatusLastInstanceTerminationDetails", + "properties": { + "terminationReason": { + "description": "Reason for termination", + "enum": [ + "BAD_BILLING_ACCOUNT", + "CLOUD_ABUSE_DETECTED", + "DISK_ERROR", + "FREE_TRIAL_EXPIRED", + "INSTANCE_UPDATE_REQUIRED_RESTART", + "INTERNAL_ERROR", + "KMS_REJECTION", + "MANAGED_INSTANCE_GROUP", + "OS_TERMINATED", + "PREEMPTED", + "SCHEDULED_STOP", + "SHUTDOWN_DUE_TO_MAINTENANCE", + "UNSPECIFIED_TERMINATION_REASON", + "USER_TERMINATED" + ], + "enumDescriptions": [ + "Terminated due to bad billing", + "Terminated by Cloud Abuse team", + "Terminated due to disk errors", + "Terminated due to free trial expired", + "Instance.update initiated which required RESTART", + "Terminated due to internal error", + "Terminated due to Key Management Service (KMS) key failure.", + "Terminated by managed instance group", + "Terminated from the OS level", + "Terminated due to preemption", + "Terminated due to scheduled stop", + "Terminated due to maintenance", + "The termination reason is not specified", + "Terminated by user" + ], + "type": "string" + } + }, + "type": "object" + }, "ResourceStatusScheduling": { "id": "ResourceStatusScheduling", "properties": { @@ -83415,7 +83517,7 @@ "description": "[Output Only] The minimum managed protection tier required for this rule. [Deprecated] Use requiredManagedProtectionTiers instead.", "enum": [ "CAMP_PLUS_ANNUAL", - "CAMP_PLUS_MONTHLY", + "CAMP_PLUS_PAYGO", "CA_STANDARD" ], "enumDescriptions": [ diff --git a/googleapiclient/discovery_cache/documents/compute.beta.json b/googleapiclient/discovery_cache/documents/compute.beta.json index 61664f1f295..67280f84748 100644 --- a/googleapiclient/discovery_cache/documents/compute.beta.json +++ b/googleapiclient/discovery_cache/documents/compute.beta.json @@ -40215,7 +40215,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -53800,11 +53800,6 @@ "type": "array" }, "baseInstanceName": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - }, "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", "pattern": "[a-z][-a-z0-9]{0,57}", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/compute.v1.json b/googleapiclient/discovery_cache/documents/compute.v1.json index 42e1a81811e..8377d3edeb1 100644 --- a/googleapiclient/discovery_cache/documents/compute.v1.json +++ b/googleapiclient/discovery_cache/documents/compute.v1.json @@ -35267,7 +35267,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -47650,11 +47650,6 @@ "type": "array" }, "baseInstanceName": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - }, "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", "pattern": "[a-z][-a-z0-9]{0,57}", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/connectors.v1.json b/googleapiclient/discovery_cache/documents/connectors.v1.json index 0de7446561a..4cc54ce292a 100644 --- a/googleapiclient/discovery_cache/documents/connectors.v1.json +++ b/googleapiclient/discovery_cache/documents/connectors.v1.json @@ -1832,7 +1832,7 @@ } } }, - "revision": "20231015", + "revision": "20231024", "rootUrl": "https://connectors.googleapis.com/", "schemas": { "AuditConfig": { @@ -2371,12 +2371,22 @@ "enum": [ "STATE_UNSPECIFIED", "REFRESHING", - "UPDATED" + "UPDATED", + "REFRESHING_SCHEMA_METADATA", + "UPDATED_SCHEMA_METADATA", + "REFRESH_SCHEMA_METADATA_FAILED", + "REFRESHING_FULL_SCHEMA", + "UPDATED_FULL_SCHEMA" ], "enumDescriptions": [ "Default state.", "Schema refresh is in progress.", - "Schema has been updated." + "Schema has been updated.", + "Schema refresh for metadata is in progress.", + "Schema metadata has been updated.", + "Failed to refresh schema metadata", + "Triggered full schema refresh", + "Updated full schema" ], "readOnly": true, "type": "string" @@ -2978,6 +2988,10 @@ "description": "Optional. Event type id of the event of current EventSubscription.", "type": "string" }, + "jms": { + "$ref": "JMS", + "description": "Optional. JMS is the source for the event listener." + }, "name": { "description": "Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription}", "type": "string" @@ -3184,6 +3198,20 @@ "description": "Enrichment Supported.", "type": "boolean" }, + "eventListenerType": { + "description": "The type of the event listener for a specific connector.", + "enum": [ + "EVENT_LISTENER_TYPE_UNSPECIFIED", + "WEBHOOK_LISTENER", + "JMS_LISTENER" + ], + "enumDescriptions": [ + "Default value.", + "Webhook listener. e.g. Jira, Zendesk, Servicenow etc.,", + "JMS Listener. e.g. IBM MQ, Rabbit MQ etc.," + ], + "type": "string" + }, "isEventingSupported": { "description": "Is Eventing Supported.", "type": "boolean" @@ -3789,6 +3817,31 @@ }, "type": "object" }, + "JMS": { + "description": "JMS message denotes the source of the event", + "id": "JMS", + "properties": { + "name": { + "description": "Optional. Name of the JMS source. i.e. queueName or topicName", + "type": "string" + }, + "type": { + "description": "Optional. Type of the JMS Source. i.e. Queue or Topic", + "enum": [ + "TYPE_UNSPECIFIED", + "QUEUE", + "TOPIC" + ], + "enumDescriptions": [ + "Default state.", + "JMS Queue.", + "JMS Topic." + ], + "type": "string" + } + }, + "type": "object" + }, "JsonSchema": { "description": "JsonSchema representation of schema metadata", "id": "JsonSchema", diff --git a/googleapiclient/discovery_cache/documents/connectors.v2.json b/googleapiclient/discovery_cache/documents/connectors.v2.json index 17b3e27c5ad..1cf0ec6c5a6 100644 --- a/googleapiclient/discovery_cache/documents/connectors.v2.json +++ b/googleapiclient/discovery_cache/documents/connectors.v2.json @@ -558,7 +558,7 @@ } } }, - "revision": "20231015", + "revision": "20231024", "rootUrl": "https://connectors.googleapis.com/", "schemas": { "Action": { diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1.json index 93ebcdec6f0..fccd16806c7 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1.json @@ -755,7 +755,7 @@ } } }, - "revision": "20231013", + "revision": "20231023", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json index bcfeea081e9..f6fda7b78d0 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json @@ -1233,7 +1233,7 @@ } } }, - "revision": "20231013", + "revision": "20231023", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "AnalysisCompleted": { diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json index 55a6cbcf2f6..1ad1602389d 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json @@ -815,7 +815,7 @@ } } }, - "revision": "20231013", + "revision": "20231023", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/content.v2.1.json b/googleapiclient/discovery_cache/documents/content.v2.1.json index fd610ec131f..728140dc7ef 100644 --- a/googleapiclient/discovery_cache/documents/content.v2.1.json +++ b/googleapiclient/discovery_cache/documents/content.v2.1.json @@ -6488,7 +6488,7 @@ } } }, - "revision": "20231020", + "revision": "20231030", "rootUrl": "https://shoppingcontent.googleapis.com/", "schemas": { "Account": { @@ -14061,7 +14061,7 @@ "type": "number" }, "predictedGrossProfitChangeFraction": { - "description": "The predicted change in gross profit as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in gross profit.", + "description": "*Deprecated*: This field is no longer supported and will start returning 0. The predicted change in gross profit as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in gross profit.", "format": "double", "type": "number" }, @@ -14071,11 +14071,11 @@ "type": "number" }, "predictedMonthlyGrossProfitChangeCurrencyCode": { - "description": "The predicted monthly gross profit change currency (ISO 4217 code).", + "description": "*Deprecated*: This field is no longer supported and will start returning USD for all requests. The predicted monthly gross profit change currency (ISO 4217 code).", "type": "string" }, "predictedMonthlyGrossProfitChangeMicros": { - "description": "The predicted change in gross profit in micros (1 millionth of a standard unit, 1 USD = 1000000 micros) after introducing the suggested price for a month compared to current active price.", + "description": "*Deprecated*: This field is no longer supported and will start returning 0. The predicted change in gross profit in micros (1 millionth of a standard unit, 1 USD = 1000000 micros) after introducing the suggested price for a month compared to current active price.", "format": "int64", "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json b/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json index 433c36bddb0..2c259249e73 100644 --- a/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json +++ b/googleapiclient/discovery_cache/documents/contentwarehouse.v1.json @@ -1156,7 +1156,7 @@ } } }, - "revision": "20231011", + "revision": "20231020", "rootUrl": "https://contentwarehouse.googleapis.com/", "schemas": { "AbuseiamAbuseType": { @@ -3733,7 +3733,8 @@ "NOTEBOOKLM_AFFINITY", "PLAYSPACE_LABS_AFFINITY", "ZOMBIE_CLOUD_AFFINITY", - "RELATIONSHIPS_AFFINITY" + "RELATIONSHIPS_AFFINITY", + "APPS_WORKFLOW_AFFINITY" ], "enumDeprecated": [ false, @@ -4007,6 +4008,7 @@ false, false, false, + false, false ], "enumDescriptions": [ @@ -4281,6 +4283,7 @@ "", "", "", + "", "" ], "type": "string" @@ -11388,8 +11391,7 @@ "type": "object" }, "AssistantApiCoreTypesCalendarEvent": { - "deprecated": true, - "description": "This proto contains the information of a calendar event, including title, start time, end time, etc. IMPORTANT: The definition of CalendarEvent proto is being moved to //assistant/api/core_types/governed/calendar_event_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. LINT.IfChange(CalendarEvent) NEXT_ID: 26", + "description": "This proto contains the information of a calendar event, including title, start time, end time, etc. LINT.IfChange(CalendarEvent) NEXT_ID: 26", "id": "AssistantApiCoreTypesCalendarEvent", "properties": { "attendees": { @@ -11723,8 +11725,7 @@ "type": "object" }, "AssistantApiCoreTypesCalendarEventWrapper": { - "deprecated": true, - "description": "This empty type allows us to publish sensitive calendar events to go/attentional-entities, while maintaining BUILD visibility protection for their contents. The BUILD-visibility-protected extension to this message is defined at http://google3/assistant/verticals/calendar/proto/multi_account_calendar_event.proto IMPORTANT: The definition of CalendarEventWrapper proto is being moved to //assistant/api/core_types/governed/calendar_event_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "This empty type allows us to publish sensitive calendar events to go/attentional-entities, while maintaining BUILD visibility protection for their contents. The BUILD-visibility-protected extension to this message is defined at http://google3/assistant/verticals/calendar/proto/multi_account_calendar_event.proto", "id": "AssistantApiCoreTypesCalendarEventWrapper", "properties": {}, "type": "object" @@ -11826,7 +11827,6 @@ "type": "object" }, "AssistantApiCoreTypesDeviceConfig": { - "deprecated": true, "description": "The identification information for third party devices that integrates with the assistant. All of these fields will be populated by the third party when the query is sent from the third party device. Next Id: 5", "id": "AssistantApiCoreTypesDeviceConfig", "properties": { @@ -11842,7 +11842,6 @@ "type": "object" }, "AssistantApiCoreTypesDeviceId": { - "deprecated": true, "description": "LINT.IfChange(DeviceId) Specifies identifier of a device AKA surface. Note there may be multiple device ids for the same physical device E.g. Allo app and Assistant app on Nexus. Note: DeviceId usage is complicated. Please do not depend on it for surface specific logic. Please use google3/assistant/api/capabilities.proto instead. IMPORTANT: When checking for equality between two `DeviceId`s, you should always use an `isSameDevice{As}` function to check for equality, as deep equality between `DeviceId`'s is not guaranteed. * C++: http://google3/assistant/assistant_server/util/device_id_util.cc;l=23;rcl=421295740 * Dart: http://google3/assistant/context/util/lib/device_id.dart;l=26;rcl=442126145 * Java: http://google3/java/com/google/assistant/assistantserver/utils/DeviceIdHelper.java;l=9;rcl=390378522 See http://go/deviceid-equality for more details. Next ID: 14", "id": "AssistantApiCoreTypesDeviceId", "properties": { @@ -11903,8 +11902,7 @@ "type": "object" }, "AssistantApiCoreTypesDeviceUserIdentity": { - "deprecated": true, - "description": "IMPORTANT: The definition of DeviceUserIdentity is being moved to //assistant/api/core_types/governed/device_user_identity.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new DeviceUserIdentity instead of this one. // LINT.IfChange", + "description": "LINT.IfChange", "id": "AssistantApiCoreTypesDeviceUserIdentity", "properties": { "deviceId": { @@ -11920,7 +11918,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedColor": { - "description": "Represents a color in the RGBA color space. This message mirrors google.type.Color.", + "deprecated": true, + "description": "LINT.IfChange Represents a color in the RGBA color space. This message mirrors google.type.Color. IMPORTANT: The definition of Color proto is being moved to //assistant/api/core_types/color_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", "id": "AssistantApiCoreTypesGovernedColor", "properties": { "alpha": { @@ -11947,7 +11946,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedDeviceConfig": { - "description": "The identification information for third party devices that integrates with the assistant. All of these fields will be populated by the third party when the query is sent from the third party device. Next Id: 5", + "deprecated": true, + "description": "The identification information for third party devices that integrates with the assistant. All of these fields will be populated by the third party when the query is sent from the third party device. IMPORTANT: The definition of DeviceConfig proto is being moved to //assistant/api/core_types/device_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. Next Id: 5", "id": "AssistantApiCoreTypesGovernedDeviceConfig", "properties": { "agentId": { @@ -11962,7 +11962,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedDeviceId": { - "description": "LINT.IfChange Specifies identifier of a device AKA surface. Note there may be multiple device ids for the same physical device E.g. Allo app and Assistant app on Nexus. Note: DeviceId usage is complicated. Please do not depend on it for surface specific logic. Please use google3/assistant/api/capabilities.proto instead. IMPORTANT: When checking for equality between two `DeviceId`s, you should always use an `isSameDevice{As}` function to check for equality, as deep equality between `DeviceId`'s is not guaranteed. * C++: http://google3/assistant/assistant_server/util/device_id_util.cc;l=23;rcl=421295740 * Dart: http://google3/assistant/context/util/lib/device_id.dart;l=26;rcl=442126145 * Java: http://google3/java/com/google/assistant/assistantserver/utils/DeviceIdHelper.java;l=9;rcl=390378522 See http://go/deviceid-equality for more details. Next ID: 14", + "deprecated": true, + "description": "LINT.IfChange Specifies identifier of a device AKA surface. Note there may be multiple device ids for the same physical device E.g. Allo app and Assistant app on Nexus. Note: DeviceId usage is complicated. Please do not depend on it for surface specific logic. Please use google3/assistant/api/capabilities.proto instead. IMPORTANT: When checking for equality between two `DeviceId`s, you should always use an `isSameDevice{As}` function to check for equality, as deep equality between `DeviceId`'s is not guaranteed. * C++: http://google3/assistant/assistant_server/util/device_id_util.cc;l=23;rcl=421295740 * Dart: http://google3/assistant/context/util/lib/device_id.dart;l=26;rcl=442126145 * Java: http://google3/java/com/google/assistant/assistantserver/utils/DeviceIdHelper.java;l=9;rcl=390378522 See http://go/deviceid-equality for more details. IMPORTANT: The definition of DeviceId proto is being moved to //assistant/api/core_types/device_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. Next ID: 14", "id": "AssistantApiCoreTypesGovernedDeviceId", "properties": { "agsaClientInstanceId": { @@ -12022,7 +12023,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedRingtoneTaskMetadata": { - "description": "Task metadata information describing the ringtone. Next id: 11", + "deprecated": true, + "description": "LINT.IfChange Task metadata information describing the ringtone. IMPORTANT: The definition of RingtoneTaskMetadata proto is being moved to //assistant/api/core_types/ringtone_task_metadata.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. Next id: 11", "id": "AssistantApiCoreTypesGovernedRingtoneTaskMetadata", "properties": { "category": { @@ -12340,7 +12342,8 @@ "type": "object" }, "AssistantApiCoreTypesGovernedSurfaceIdentity": { - "description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. NEXT ID: 6 LINT.IfChange", + "deprecated": true, + "description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. IMPORTANT: The partial migration to the SurfaceIdentity and SurfaceVersion protos defined here is being rolled back (b/303012824). All existing references will be updated to point back to //assistant/api/core_types/surface_identity.proto. If you are adding a reference, use the SurfaceIdentity and SurfaceVersion protos defined there. NEXT ID: 6 LINT.IfChange", "id": "AssistantApiCoreTypesGovernedSurfaceIdentity", "properties": { "deviceId": { @@ -12667,6 +12670,7 @@ "type": "object" }, "AssistantApiCoreTypesGovernedSurfaceVersion": { + "deprecated": true, "description": "The version of the surface/client. New surfaces are encouraged to only use the \u201cmajor\u201d field to keep track of version number. The \u201cminor\u201d field may be used for surfaces that rely on both the \u201cmajor\u201d and \u201cminor\u201d fields to define their version.", "id": "AssistantApiCoreTypesGovernedSurfaceVersion", "properties": { @@ -12697,8 +12701,7 @@ "type": "object" }, "AssistantApiCoreTypesImage": { - "deprecated": true, - "description": "An image represents the data about an image or a photo. IMPORTANT: The definition of the Image message is being moved to //assistant/api/core_types/governed/image_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new Image message instead of this one. LINT.IfChange NextId: 13", + "description": "An image represents the data about an image or a photo. LINT.IfChange NextId: 13", "id": "AssistantApiCoreTypesImage", "properties": { "accessibilityText": { @@ -13124,8 +13127,7 @@ "type": "object" }, "AssistantApiCoreTypesSurfaceIdentity": { - "deprecated": true, - "description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. NEXT ID: 6 IMPORTANT: The definitions of the SurfaceIdentity and SurfaceVersion protos are being moved to //assistant/api/core_types/governed/surface_identity.proto All existing references will be updated to point to the new location. If you are adding a reference, use the new SurfaceIdentity and SurfaceVersion protos instead of the protos defined here. LINT.IfChange", + "description": "The set of information that helps the server identify the surface. This replaces the User-Agent string within the Assistant Server. Note: The SurfaceIdentity proto should only be used to derive the capabilities of a surface. It should not be accessed outside of the CapabilityBuilder or CapabilityChecker. NEXT ID: 6 LINT.IfChange", "id": "AssistantApiCoreTypesSurfaceIdentity", "properties": { "deviceId": { @@ -13446,7 +13448,7 @@ }, "surfaceTypeString": { "deprecated": true, - "description": "DEPRECATED. assistant.api.core_types.governed.SurfaceIdentity.surface_type field should be used instead. The device's surface type. This is the string version of the assistant.api.core_types.SurfaceType enum. The server should not use this field, rather it should use the SurfaceType value derived from this string.", + "description": "DEPRECATED. The legacy device's surface type string. NOTE: Prefer using the ontological `surface_type` field. The device's surface type. This is the string version of the assistant.api.core_types.SurfaceType enum. The server should not use this field, rather it should use the SurfaceType value derived from this string.", "type": "string" }, "surfaceVersion": { @@ -13457,7 +13459,6 @@ "type": "object" }, "AssistantApiCoreTypesSurfaceType": { - "deprecated": true, "description": "Specifies the types of device surfaces. LINT.IfChange When adding new surface types make sure that My Activity (https://myactivity.google.com/product/assistant) will correctly render by adding your enum to http://cs/symbol:GetAssistSurfaceName%20f:%5C.cc$ If your type doesn't fit in to any of the existing surfaces messages, add a new message in http://google3/personalization/footprints/boq/uservisible/events/intl/smh_frontend_messages.h.", "id": "AssistantApiCoreTypesSurfaceType", "properties": { @@ -13651,7 +13652,6 @@ "type": "object" }, "AssistantApiCoreTypesSurfaceVersion": { - "deprecated": true, "description": "The version of the surface/client. New surfaces are encouraged to only use the \u201cmajor\u201d field to keep track of version number. The \u201cminor\u201d field may be used for surfaces that rely on both the \u201cmajor\u201d and \u201cminor\u201d fields to define their version.", "id": "AssistantApiCoreTypesSurfaceVersion", "properties": { @@ -13723,8 +13723,7 @@ "type": "object" }, "AssistantApiDate": { - "deprecated": true, - "description": "A Gregorian calendar date. IMPORTANT: The definition of Date proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "A Gregorian calendar date.", "id": "AssistantApiDate", "properties": { "day": { @@ -13746,8 +13745,7 @@ "type": "object" }, "AssistantApiDateTime": { - "deprecated": true, - "description": "A date-time specification, combining a date and civil time (relative to a given timezone). IMPORTANT: The definition of DateTime proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "A date-time specification, combining a date and civil time (relative to a given timezone).", "id": "AssistantApiDateTime", "properties": { "date": { @@ -13765,6 +13763,21 @@ }, "type": "object" }, + "AssistantApiDateTimeRange": { + "description": "A representation of a range of time with start and end datetime specified.", + "id": "AssistantApiDateTimeRange", + "properties": { + "endDate": { + "$ref": "AssistantApiDateTime", + "description": "End date of the range." + }, + "startDate": { + "$ref": "AssistantApiDateTime", + "description": "Start date of the range." + } + }, + "type": "object" + }, "AssistantApiDeviceCapabilities": { "description": "This message describes roughly what a surface is capable of doing and metadata around those capabilities. These capabilities are determined based on: - device hardware - software - status (e.g. volume level, battery percentage) These capabilities refer to the surface and not the physical device. The list of supported surfaces can be found in the assistant.api.core_types.SurfaceType enum. A surface's capabilities can differ from the device's. An example would be ANDROID_ALLO running on Pixel. Allo does not support AudioInput while the Pixel does. In this case, audio_input will be set to false for Assistant Allo requests while it might be set to true for OPA_NEXUS requests. Next ID: 36", "id": "AssistantApiDeviceCapabilities", @@ -15137,8 +15150,7 @@ "type": "object" }, "AssistantApiRecurrence": { - "deprecated": true, - "description": "Date-based recurrences specify repeating events. Conceptually, a recurrence is a (possibly unbounded) sequence of dates on which an event falls, described by a list of constraints. A date is in a recurrence if and only if it satisfies all of the constraints. Note that devices may support some constraints, but not all. IMPORTANT: The definition of Recurrence proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "Date-based recurrences specify repeating events. Conceptually, a recurrence is a (possibly unbounded) sequence of dates on which an event falls, described by a list of constraints. A date is in a recurrence if and only if it satisfies all of the constraints. Note that devices may support some constraints, but not all.", "id": "AssistantApiRecurrence", "properties": { "begin": { @@ -15148,7 +15160,7 @@ "blacklistedRanges": { "description": "A list of blacklisted dates to skip the alarm on.", "items": { - "$ref": "AssistantApiRecurrenceDatetimeRange" + "$ref": "AssistantApiDateTimeRange" }, "type": "array" }, @@ -15201,21 +15213,6 @@ }, "type": "object" }, - "AssistantApiRecurrenceDatetimeRange": { - "description": "A representation of a range of time with start and end datetime specified.", - "id": "AssistantApiRecurrenceDatetimeRange", - "properties": { - "endDate": { - "$ref": "AssistantApiDateTime", - "description": "End date of the range." - }, - "startDate": { - "$ref": "AssistantApiDateTime", - "description": "Start date of the range." - } - }, - "type": "object" - }, "AssistantApiScreenCapabilities": { "description": "These capabilities represent the tactile features associated with the device. This includes, for example, whether the device has a screen, how big the screen is, and privacy of the screen. Next ID: 11", "id": "AssistantApiScreenCapabilities", @@ -18130,8 +18127,7 @@ "type": "object" }, "AssistantApiTimeOfDay": { - "deprecated": true, - "description": "A civil time relative to a timezone. IMPORTANT: The definition of TimeOfDay proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "A civil time relative to a timezone.", "id": "AssistantApiTimeOfDay", "properties": { "hour": { @@ -18158,8 +18154,7 @@ "type": "object" }, "AssistantApiTimeZone": { - "deprecated": true, - "description": "A time zone. Conceptually, a time zone is a set of rules associated with a location that describes a UTC offset and how it changes over time (e.g. Daylight Saving Time). The offset is used to compute the local date and time. IMPORTANT: The definition of TimeZone enum is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead.", + "description": "A time zone. Conceptually, a time zone is a set of rules associated with a location that describes a UTC offset and how it changes over time (e.g. Daylight Saving Time). The offset is used to compute the local date and time.", "id": "AssistantApiTimeZone", "properties": { "ianaId": { @@ -18170,8 +18165,7 @@ "type": "object" }, "AssistantApiTimestamp": { - "deprecated": true, - "description": "An absolute point in time independent of timezone or calendar, based on the proto3 Timestamp (//google/protobuf/timestamp.proto). IMPORTANT: The definition of Timestamp proto is being moved to //assistant/api/core_types/governed/datetime_type.proto. All existing references will be updated to point to the new location. If you are adding a reference, use the new one instead. NOTE: THIS IS NO LONGER RECOMMENDED TO BE USED. It was originally defined separately from google.protobuf.Timestamp due to incompatibility with proto2 syntax. The incompatibility issues have since been resolved and so the Google-wide standard representation of google.protobuf.Timestamp should be preferred. In fact, google.protobuf.* protos in general are now recommended to be used in new APIs.", + "description": "An absolute point in time independent of timezone or calendar, based on the proto3 Timestamp (//google/protobuf/timestamp.proto). NOTE: THIS IS NO LONGER RECOMMENDED TO BE USED. It was originally defined separately from google.protobuf.Timestamp due to incompatibility with proto2 syntax. The incompatibility issues have since been resolved and so the Google-wide standard representation of google.protobuf.Timestamp should be preferred. In fact, google.protobuf.* protos in general are now recommended to be used in new APIs.", "id": "AssistantApiTimestamp", "properties": { "nanos": { @@ -18239,7 +18233,7 @@ "type": "object" }, "AssistantContextAppProviderId": { - "description": "LINT.IfChanged Identifier for an application provider. NOTE: AppProviderId contains surface-specific info, such as the Android package name of the application. This was necessary for supporting current use cases that rely on surface-specific info in feature code. Eventually we want to deprecate AppProviderId and fetch surface-specific info in some other way (e.g. in a surface-translation layer). But until then, we may continue extending AppProviderId with other surface-specific info.", + "description": "LINT.IfChange Identifier for an application provider. NOTE: AppProviderId contains surface-specific info, such as the Android package name of the application. This was necessary for supporting current use cases that rely on surface-specific info in feature code. Eventually we want to deprecate AppProviderId and fetch surface-specific info in some other way (e.g. in a surface-translation layer). But until then, we may continue extending AppProviderId with other surface-specific info.", "id": "AssistantContextAppProviderId", "properties": { "activityClassName": { @@ -19485,7 +19479,8 @@ "NAME_CORRECTION_LOG", "FUZZY_CONTACT_MATCH", "NEURAL_CONTACT_MATCH", - "NEURAL_CONTACT_MATCH_DARK_LAUNCH" + "NEURAL_CONTACT_MATCH_DARK_LAUNCH", + "PERSONALIZED_NAME_CORRECTION_LOG" ], "enumDescriptions": [ "", @@ -19494,7 +19489,8 @@ "Alternate name from contact correction history.", "Fuzzy match with user's contacts.", "Neural match. See go/phonetic-contact-match.", - "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it." + "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", + "Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." ], "type": "string" } @@ -19926,7 +19922,7 @@ "type": "object" }, "AssistantGroundingRankerMediaGroundingProviderFeatures": { - "description": "Features to be passed from Media GP to HGR. Next ID: 20", + "description": "Features to be passed from Media GP to HGR. Next ID: 21", "id": "AssistantGroundingRankerMediaGroundingProviderFeatures", "properties": { "albumReleaseType": { @@ -19996,6 +19992,10 @@ "description": "True if the user requests seed radio.", "type": "boolean" }, + "isSelfReportedSvodProvider": { + "description": "Provider is a self(user) reported subscripted provider https://g3doc.corp.google.com/knowledge/g3doc/ump/development/GetProviderAffinity.md?cl=head", + "type": "boolean" + }, "isYoutubeMusicSeeking": { "description": "Indicates whether this is youtube content seeking music.", "type": "boolean" @@ -20632,7 +20632,8 @@ "NAME_CORRECTION_LOG", "FUZZY_CONTACT_MATCH", "NEURAL_CONTACT_MATCH", - "NEURAL_CONTACT_MATCH_DARK_LAUNCH" + "NEURAL_CONTACT_MATCH_DARK_LAUNCH", + "PERSONALIZED_NAME_CORRECTION_LOG" ], "enumDescriptions": [ "", @@ -20641,7 +20642,8 @@ "Alternate name from contact correction history.", "Fuzzy match with user's contacts.", "Neural match. See go/phonetic-contact-match.", - "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it." + "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", + "Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." ], "type": "string" }, @@ -32762,7 +32764,7 @@ "type": "object" }, "GeostoreCityJsonProto": { - "description": "CityJsonProto is a custom proto representation of the portion of the CityJSON spec (https://www.cityjson.org/) relevant to internal projects. See go/cityjsonproto-design for more information about the modeling and design decisions implemented here.", + "description": "CityJsonProto is a custom proto representation of the portion of the CityJSON spec (https://www.cityjson.org/) relevant to internal projects. See go/cityjsonproto-design for more information about the modeling and design decisions implemented here. LINT.IfChange", "id": "GeostoreCityJsonProto", "properties": { "cityObjects": { @@ -32820,7 +32822,7 @@ "type": "object" }, "GeostoreCityJsonProtoCityObjectGeometry": { - "description": "Representation of geometry. Geometries vary both in type and in level-of-detail, enabling representation of any shape at any level of granularity.", + "description": "Representation of geometry including geometric primitives which are used as building blocks to construct geometries of varying complexity. Geometries vary both in type and in level-of-detail, enabling representation of any shape at any level of granularity. All geometries are ultimately composed of `MultiPoint`s, which reference the actual vertices. Only linear and planar shapes are allowed, no curves or parametric surfaces.", "id": "GeostoreCityJsonProtoCityObjectGeometry", "properties": { "lod": { @@ -33408,6 +33410,7 @@ "PROVIDER_GOOGLE_GEO_NG_LOCAL", "PROVIDER_GOOGLE_MAPFACTS_CLEANUP", "PROVIDER_GOOGLE_THIRD_PARTY_UGC", + "PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", "PROVIDER_GOOGLE_LOCALSEARCH", "PROVIDER_GOOGLE_TRANSIT", "PROVIDER_GOOGLE_GEOWIKI", @@ -34131,6 +34134,7 @@ false, false, false, + false, true, false, false, @@ -34672,7 +34676,7 @@ "", "ABSTRACT", "", - "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B7", + "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B8", "ABSTRACT", "", "", @@ -34850,6 +34854,7 @@ "", "", "UMBRELLA", + "", "The next new \"Google\" provider entries should be placed above.", "UMBRELLA", "", @@ -39717,6 +39722,7 @@ "PROVIDER_GOOGLE_GEO_NG_LOCAL", "PROVIDER_GOOGLE_MAPFACTS_CLEANUP", "PROVIDER_GOOGLE_THIRD_PARTY_UGC", + "PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", "PROVIDER_GOOGLE_LOCALSEARCH", "PROVIDER_GOOGLE_TRANSIT", "PROVIDER_GOOGLE_GEOWIKI", @@ -40440,6 +40446,7 @@ false, false, false, + false, true, false, false, @@ -40981,7 +40988,7 @@ "", "ABSTRACT", "", - "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B7", + "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B8", "ABSTRACT", "", "", @@ -41159,6 +41166,7 @@ "", "", "UMBRELLA", + "", "The next new \"Google\" provider entries should be placed above.", "UMBRELLA", "", @@ -42653,6 +42661,7 @@ "PROVIDER_GOOGLE_GEO_NG_LOCAL", "PROVIDER_GOOGLE_MAPFACTS_CLEANUP", "PROVIDER_GOOGLE_THIRD_PARTY_UGC", + "PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", "PROVIDER_GOOGLE_LOCALSEARCH", "PROVIDER_GOOGLE_TRANSIT", "PROVIDER_GOOGLE_GEOWIKI", @@ -43376,6 +43385,7 @@ false, false, false, + false, true, false, false, @@ -43917,7 +43927,7 @@ "", "ABSTRACT", "", - "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B7", + "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B8", "ABSTRACT", "", "", @@ -44095,6 +44105,7 @@ "", "", "UMBRELLA", + "", "The next new \"Google\" provider entries should be placed above.", "UMBRELLA", "", @@ -45892,6 +45903,7 @@ "PROVIDER_GOOGLE_GEO_NG_LOCAL", "PROVIDER_GOOGLE_MAPFACTS_CLEANUP", "PROVIDER_GOOGLE_THIRD_PARTY_UGC", + "PROVIDER_GOOGLE_GEO_ISSUE_ADMIN", "PROVIDER_GOOGLE_LOCALSEARCH", "PROVIDER_GOOGLE_TRANSIT", "PROVIDER_GOOGLE_GEOWIKI", @@ -46615,6 +46627,7 @@ false, false, false, + false, true, false, false, @@ -47156,7 +47169,7 @@ "", "ABSTRACT", "", - "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B7", + "All new \"Google\" provider entries must be full ints. The next available ID is: 0x111730B8", "ABSTRACT", "", "", @@ -47334,6 +47347,7 @@ "", "", "UMBRELLA", + "", "The next new \"Google\" provider entries should be placed above.", "UMBRELLA", "", @@ -60877,168 +60891,6 @@ }, "type": "object" }, - "HumanSensingFaceAttribute": { - "description": "Defines a generic attribute. The name field is the name of the attribute (for example beard, glasses, joy). The confidence defines how reliable the given annotation is. For binary attributes it is bounded between 0 and 1 and can be interpreted as the posterior probability. The value field can be used for continuous attributes like age. Information returned or stored in this message may be sensitive from a privacy, policy, or legal point of view. Clients should consult with their p-counsels and the privacy working group (go/pwg) to make sure their use respects Google policies.", - "id": "HumanSensingFaceAttribute", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "name": { - "type": "string" - }, - "type": { - "enum": [ - "TYPE_UNKNOWN", - "FREE_FORM", - "FEMALE", - "MALE", - "AGE", - "NON_HUMAN", - "GLASSES", - "DARK_GLASSES", - "HEADWEAR", - "EYES_VISIBLE", - "LEFT_EYELID_CLOSED", - "RIGHT_EYELID_CLOSED", - "MOUTH_OPEN", - "FACIAL_HAIR", - "LONG_HAIR", - "FRONTAL_GAZE", - "SMILING", - "UNDER_EXPOSED", - "BLURRED", - "LEFT_EYE_VISIBLE", - "RIGHT_EYE_VISIBLE", - "LEFT_EAR_VISIBLE", - "RIGHT_EAR_VISIBLE", - "NOSE_TIP_VISIBLE", - "MOUTH_CENTER_VISIBLE", - "LOWER_FACE_COVERED", - "AMUSEMENT", - "ANGER", - "CONCENTRATION", - "CONFUSION", - "CONTENTMENT", - "DESIRE", - "DISAPPOINTMENT", - "DISGUST", - "ELATION", - "EMBARRASSMENT", - "INTEREST", - "LOVE", - "PAIN", - "PRIDE", - "RELIEF", - "SADNESS", - "SURPRISE", - "CANDID", - "POSED" - ], - "enumDeprecated": [ - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false - ], - "enumDescriptions": [ - "", - "", - "Attribute types that describe the gender of a face. For an attribute if type FEMALE the confidence represent the probability of a face to be from a female person. Similarly, for an attribute of type MALE the confidence is the probability of a face to be from a male person. 4 is reserved for OTHER_GENDER.", - "", - "Attribute type that represent the age of the face. For an attribute of this type the field value represent the age. Values are assumed to be in the range [0, 95].", - "This attributes is used to distinguish actual human faces from other possible face detections like face of sculptures, cartoons faces, and some false detections.", - "Attributes types that describes face appearances/configurations (mouth open, eyes visibles and looking into the camera, smiling) and props (glasses, dark glasses, and headwear).", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Attributes for the visibility of face landmarks. The landmarks refers to a single point in the image, so the eyes are visible if their center is visible, the ears are visible if the ear tragion is visible.", - "", - "", - "", - "", - "", - "An attribute describing if the lower part of a face is covered by something like a face mask, a scarf or any other type of covering. The expectation is for both the mouth and the nose tip to be covered. This is useful for labeling faces in images captured during the Covid pandemic.", - "FeelNet expressions.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "value": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, "I18nPhonenumbersPhoneNumber": { "description": "The PhoneNumber object that is used by all LibPhoneNumber API's to fully represent a phone number.", "id": "I18nPhonenumbersPhoneNumber", @@ -61542,7 +61394,7 @@ "type": "object" }, "ImageData": { - "description": "This defines the per-doc data which is extracted from thumbnails and propagated over to indexing. It contains all information that can be used for restricts. Next tag id: 132", + "description": "This defines the per-doc data which is extracted from thumbnails and propagated over to indexing. It contains all information that can be used for restricts. Next tag id: 131", "id": "ImageData", "properties": { "adaboostImageFeaturePorn": { @@ -61666,10 +61518,6 @@ "$ref": "PhotosImageMetadata", "description": "The EXIF generated by photos backend team's (more specifically FIFE's) thumbnailer library. This exif model is more comprehensive since a dedicated team is constantly improving it and adding new fields over time. This is currently populated by moonshine for selected corpora." }, - "faceDetection": { - "$ref": "ReneFaceResponse", - "description": "Face Detection." - }, "featuredImageProp": { "$ref": "ImageMonetizationFeaturedImageProperties", "description": "Properties used in featured imagesearch project. inspiration_score indicates how well an image is related to products, or how inspirational it is." @@ -63385,7 +63233,8 @@ "GENUS_SEARCH_SPORTS", "GENUS_BUSINESSMESSAGING", "GENUS_AERIAL_VIEW", - "GENUS_DOCS_FLIX_RENDER" + "GENUS_DOCS_FLIX_RENDER", + "GENUS_SHOPPING" ], "enumDescriptions": [ "", @@ -63436,7 +63285,8 @@ "Genus for Search Sports vertical videos", "Genus for Business Messaging videos", "Genus for Geo Aerial View", - "Genus for Flix Render (Docs)" + "Genus for Flix Render (Docs)", + "Genus for CDS videos processed through Amarna." ], "type": "string" }, @@ -64336,7 +64186,8 @@ "GENUS_SEARCH_SPORTS", "GENUS_BUSINESSMESSAGING", "GENUS_AERIAL_VIEW", - "GENUS_DOCS_FLIX_RENDER" + "GENUS_DOCS_FLIX_RENDER", + "GENUS_SHOPPING" ], "enumDescriptions": [ "", @@ -64387,7 +64238,8 @@ "Genus for Search Sports vertical videos", "Genus for Business Messaging videos", "Genus for Geo Aerial View", - "Genus for Flix Render (Docs)" + "Genus for Flix Render (Docs)", + "Genus for CDS videos processed through Amarna." ], "type": "string" }, @@ -91252,6 +91104,7 @@ "NOTEBOOKLM", "ZOMBIE_CLOUD", "RELATIONSHIPS", + "APPS_WORKFLOW", "DEPRECATED_QUICKSTART_FLUME", "DUO_CLIENT", "ALBERT", @@ -91908,97 +91761,98 @@ false, false, false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, false, false, false, @@ -92564,6 +92418,7 @@ "Notebooklm Team contact: labs-tailwind-eng-team@google.com", "Zombie Cloud Team contact: zombie-cloud-eng@google.com", "Relationships Team contact: hana-dev@google.com", + "Apps Workflow Team contact: workflows-frontend-eng@google.com", "", "Duo Client Team contact: duo-eng@google.com", "Project albert (go/albert-frontend) Team contact: albert-eng@google.com", @@ -96430,578 +96285,6 @@ }, "type": "object" }, - "PhotosVisionServiceFaceFaceParams": { - "description": "FaceParams are a collection of parameters of a single face found in an image. WARNING: This message has a jspb target. If you add a new message field inside, either put its definition inside this message as well or add the js file corresponding to the new message to the js_deps and proto_js rules in the BUILD file; otherwise it will break lots of builds. The js file name is the message name all in lowercase letters. Next available id: 40.", - "id": "PhotosVisionServiceFaceFaceParams", - "properties": { - "age": { - "description": "The age of the face. Range [0.0, 120.0].", - "format": "float", - "type": "number" - }, - "angerProbability": { - "format": "float", - "type": "number" - }, - "attribute": { - "description": "Attributes for the detected face. Information returned or stored in this message may be sensitive from a privacy, policy, or legal point of view. Clients should consult with their p-counsels and the privacy working group (go/pwg) to make sure their use respects Google policies.", - "items": { - "$ref": "HumanSensingFaceAttribute" - }, - "type": "array" - }, - "beardProbability": { - "format": "float", - "type": "number" - }, - "blurredProbability": { - "format": "float", - "type": "number" - }, - "boundingBox": { - "$ref": "PhotosVisionServiceFaceFaceParamsBoundingBox", - "description": "Bounding box around the face. The coordinates of the bounding box are in the original image's scale as returned in ImageParams. The bounding box is computed to \"frame\" the face as a human would expect, and is typically used in UI (e.g. G+ to show circles around detected faces). It is based on the landmarker results." - }, - "darkGlassesProbability": { - "format": "float", - "type": "number" - }, - "detectionConfidence": { - "description": "Confidence is in the range [0,1].", - "format": "float", - "type": "number" - }, - "extendedLandmarks": { - "items": { - "$ref": "PhotosVisionServiceFaceFaceParamsExtendedLandmark" - }, - "type": "array" - }, - "eyesClosedProbability": { - "format": "float", - "type": "number" - }, - "face2cartoonResults": { - "$ref": "ResearchVisionFace2cartoonFace2CartoonResults", - "description": "Attributes of the detected face useful for generating a cartoon version of the face." - }, - "faceCropV8": { - "$ref": "PhotosVisionServiceFaceFaceParamsFaceCropV8" - }, - "fdBoundingBox": { - "$ref": "PhotosVisionServiceFaceFaceParamsBoundingBox", - "description": "This other bounding box is tighter than the previous one, and encloses only the skin part of the face. It is typically used to eliminate the face from any image analysis that looks up the \"amount of skin\" visible in an image (e.g. safesearch content score). It is not based on the landmarker results, just on the initial face detection, hence the 'fd' prefix." - }, - "femaleProbability": { - "description": "Probability is in the range [0,1].", - "format": "float", - "type": "number" - }, - "frontalGazeProbability": { - "format": "float", - "type": "number" - }, - "glassesProbability": { - "format": "float", - "type": "number" - }, - "headwearProbability": { - "format": "float", - "type": "number" - }, - "imageParams": { - "$ref": "PhotosVisionServiceFaceImageParams", - "description": "A copy of the 'image_params' field that is also returned as part of the ExtractFacesReply. It contains the with and height of the image the face extraction was performed on and provides the original frame of reference for the bounding boxes above." - }, - "joyProbability": { - "format": "float", - "type": "number" - }, - "landmarkPositions": { - "items": { - "$ref": "PhotosVisionServiceFaceFaceParamsLandmarkPosition" - }, - "type": "array" - }, - "landmarkingConfidence": { - "format": "float", - "type": "number" - }, - "leftEyeClosedProbability": { - "format": "float", - "type": "number" - }, - "longHairProbability": { - "format": "float", - "type": "number" - }, - "mouthOpenProbability": { - "format": "float", - "type": "number" - }, - "nonHumanProbability": { - "format": "float", - "type": "number" - }, - "panAngle": { - "description": "Yaw angle. Indicates how much leftward/rightward the face is pointing relative to the vertical plane perpendicular to the image. Range [-180,180].", - "format": "float", - "type": "number" - }, - "poseMatrix": { - "$ref": "PhotosVisionServiceFaceFaceParamsPoseMatrix" - }, - "pretemplate": { - "format": "byte", - "type": "string" - }, - "qualityScore": { - "description": "A score produced by the Face Quality Scoring Module that indicates overall quality of the face and its relative suitability for using it in conjunction with face recognition for instance. As such, the score predicts the likelihood to recognize a given face correctly. A face recognition client could use the score and a threshold to determine whether to use the face in a face model, or whether to even consider it for recognition.", - "format": "float", - "type": "number" - }, - "rightEyeClosedProbability": { - "format": "float", - "type": "number" - }, - "rollAngle": { - "description": "Roll angle indicates how much clockwise/anti-clockwise the face is rotated relative to the image vertical and about the axis perpendicular to the face. Range [-180,180].", - "format": "float", - "type": "number" - }, - "signature": { - "deprecated": true, - "description": "Deprecated: signature will continue to be used for the pre-1.7 SDK template format typically created by the converter module CNVprec_461. All newer templates created with CNVprec_465 or later will use the repeated 'versioned_signatures' field to store the templates and version info.", - "format": "byte", - "type": "string" - }, - "skinBrightnessProbability": { - "format": "float", - "type": "number" - }, - "sorrowProbability": { - "format": "float", - "type": "number" - }, - "surpriseProbability": { - "format": "float", - "type": "number" - }, - "tiltAngle": { - "description": "Pitch angle. Indicates how much upwards/downwards the face is pointing relative to the image's horizontal plane. Range [-180,180].", - "format": "float", - "type": "number" - }, - "underExposedProbability": { - "format": "float", - "type": "number" - }, - "versionedSignatures": { - "items": { - "$ref": "PhotosVisionServiceFaceVersionedFaceSignature" - }, - "type": "array" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsBoundingBox": { - "id": "PhotosVisionServiceFaceFaceParamsBoundingBox", - "properties": { - "x1": { - "description": "These coordinates are in the same scale as the original image. 0 <= x < width, 0 <= y < height.", - "format": "int32", - "type": "integer" - }, - "x2": { - "format": "int32", - "type": "integer" - }, - "y1": { - "format": "int32", - "type": "integer" - }, - "y2": { - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsExtendedLandmark": { - "description": "Below is the set of extended landmarks added by LMprec_508 and 510. All future additional landmarks should be added to this message.", - "id": "PhotosVisionServiceFaceFaceParamsExtendedLandmark", - "properties": { - "id": { - "enum": [ - "NOSE_BOTTOM_RIGHT", - "NOSE_BOTTOM_LEFT", - "NOSE_BOTTOM_CENTER", - "LEFT_EYE_TOP_BOUNDARY", - "LEFT_EYE_RIGHT_CORNER", - "LEFT_EYE_BOTTOM_BOUNDARY", - "LEFT_EYE_LEFT_CORNER", - "RIGHT_EYE_TOP_BOUNDARY", - "RIGHT_EYE_RIGHT_CORNER", - "RIGHT_EYE_BOTTOM_BOUNDARY", - "RIGHT_EYE_LEFT_CORNER", - "LEFT_EYEBROW_UPPER_MIDPOINT", - "RIGHT_EYEBROW_UPPER_MIDPOINT", - "LEFT_EAR_TRAGION", - "RIGHT_EAR_TRAGION", - "LEFT_EYE_PUPIL", - "RIGHT_EYE_PUPIL", - "FOREHEAD_GLABELLA", - "CHIN_GNATHION", - "CHIN_LEFT_GONION", - "CHIN_RIGHT_GONION", - "LEFT_CHEEK_CENTER", - "RIGHT_CHEEK_CENTER", - "UNKNOWN_LANDMARK" - ], - "enumDescriptions": [ - "", - "", - "The following landmark is available with LMprec_508 and later", - "The following landmarks are extracted by LMprec_510 and later. See also documentation at www/~jsteffens/no_crawl/doc/FaceDetection/LM510.pdf", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "The following landmarks are extracted by LMprec_600 and later. See go/facesdk.", - "", - "Reserved id for an unknown landmark. This matches the id reserved by the core SDK for an external UNKNOWN landmark." - ], - "type": "string" - }, - "x": { - "description": "NOTE that landmark positions may fall outside the bounds of the image when the face is near one or more edges of the image. That is, it is NOT guaranteed that 0 <= x < width or 0 <= y < height. Rounded version of x_f.", - "format": "int32", - "type": "integer" - }, - "xF": { - "format": "float", - "type": "number" - }, - "y": { - "description": "Rounded version of y_f.", - "format": "int32", - "type": "integer" - }, - "yF": { - "format": "float", - "type": "number" - }, - "z": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsFaceCropV8": { - "description": "Information defining a FaceCrop for a particular face. See go/on-device-face-grouping-face-crops for more details.", - "id": "PhotosVisionServiceFaceFaceParamsFaceCropV8", - "properties": { - "centerX": { - "description": "The X coordinate of the center of the face crop.", - "format": "float", - "type": "number" - }, - "centerY": { - "description": "The Y coordinate of the center of the face crop.", - "format": "float", - "type": "number" - }, - "rotation": { - "description": "Rotation of the face crop, in radians.", - "format": "float", - "type": "number" - }, - "scale": { - "description": "Scale to apply to the coordinates of the face crop.", - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsLandmarkPosition": { - "id": "PhotosVisionServiceFaceFaceParamsLandmarkPosition", - "properties": { - "landmark": { - "description": "Some landmarks are set during face finding and some are set during landmark finding. Only after landmarking will all landmarks be set.", - "enum": [ - "LEFT_EYE", - "RIGHT_EYE", - "LEFT_OF_LEFT_EYEBROW", - "RIGHT_OF_LEFT_EYEBROW", - "LEFT_OF_RIGHT_EYEBROW", - "RIGHT_OF_RIGHT_EYEBROW", - "MIDPOINT_BETWEEN_EYES", - "NOSE_TIP", - "UPPER_LIP", - "LOWER_LIP", - "MOUTH_LEFT", - "MOUTH_RIGHT", - "MOUTH_CENTER", - "DEPRECATED_NOSE_BOTTOM_RIGHT", - "DEPRECATED_NOSE_BOTTOM_LEFT", - "DEPRECATED_NOSE_BOTTOM_CENTER", - "DEPRECATED_LEFT_EYE_TOP_BOUNDARY", - "DEPRECATED_LEFT_EYE_RIGHT_CORNER", - "DEPRECATED_LEFT_EYE_BOTTOM_BOUNDARY", - "DEPRECATED_LEFT_EYE_LEFT_CORNER", - "DEPRECATED_RIGHT_EYE_TOP_BOUNDARY", - "DEPRECATED_RIGHT_EYE_RIGHT_CORNER", - "DEPRECATED_RIGHT_EYE_BOTTOM_BOUNDARY", - "DEPRECATED_RIGHT_EYE_LEFT_CORNER", - "DEPRECATED_LEFT_EYEBROW_UPPER_MIDPOINT", - "DEPRECATED_RIGHT_EYEBROW_UPPER_MIDPOINT", - "DEPRECATED_LEFT_EAR_TRAGION", - "DEPRECATED_RIGHT_EAR_TRAGION", - "DEPRECATED_FOREHEAD_GLABELLA", - "DEPRECATED_CHIN_GNATHION", - "DEPRECATED_CHIN_LEFT_GONION", - "DEPRECATED_CHIN_RIGHT_GONION", - "DEPRECATED_UNKNOWN_LANDMARK" - ], - "enumDeprecated": [ - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true - ], - "enumDescriptions": [ - "Left and right are as viewed in the image without considering mirror projection typical in photos. So LEFT_EYE is typically the person's right eye. For convenience and consistency the enum values mirror the corresponding values defined by the Neven Vision SDK. See landmark table at: wiki/twiki/bin/view/Main/FRSDKLandmarkPositions The following landmarks are extracted by LMprec_502 and later", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "All values below are deprecated. Please use ExtendedLandmark to use them.", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "x": { - "description": "NOTE that landmark positions may fall outside the bounds of the image when the face is near one or more edges of the image. That is, it is NOT guaranteed that 0 <= x < width or 0 <= y < height. Rounded version of x_f.", - "format": "int32", - "type": "integer" - }, - "xF": { - "format": "float", - "type": "number" - }, - "y": { - "description": "Rounded version of y_f.", - "format": "int32", - "type": "integer" - }, - "yF": { - "format": "float", - "type": "number" - }, - "z": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceFaceParamsPoseMatrix": { - "description": "Stores the full pose transformation matrix of the detected face. From this the roll, pan, tilt angles can be computed.", - "id": "PhotosVisionServiceFaceFaceParamsPoseMatrix", - "properties": { - "xx": { - "format": "float", - "type": "number" - }, - "xy": { - "format": "float", - "type": "number" - }, - "xz": { - "format": "float", - "type": "number" - }, - "yx": { - "format": "float", - "type": "number" - }, - "yy": { - "format": "float", - "type": "number" - }, - "yz": { - "format": "float", - "type": "number" - }, - "zx": { - "format": "float", - "type": "number" - }, - "zy": { - "format": "float", - "type": "number" - }, - "zz": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceImageParams": { - "description": "ImageParams are a collection of parameters of the image on which face detection was performed.", - "id": "PhotosVisionServiceFaceImageParams", - "properties": { - "height": { - "format": "int32", - "type": "integer" - }, - "width": { - "format": "int32", - "type": "integer" - } - }, - "type": "object" - }, - "PhotosVisionServiceFaceVersionedFaceSignature": { - "description": "From newer SDK versions onward (1.7+), each face template (signature) will also store a version # derived from the converter version that created the template.", - "id": "PhotosVisionServiceFaceVersionedFaceSignature", - "properties": { - "confidence": { - "description": "Confidence score based on embedding uncertainty. This is populated if fetch_facenet_confidence has been set as true in FaceNetConfig, and FaceNet version satisfies one of the following: 1. FACENET_8. 2. FACENET_9 with confidence model enabled in FaceTemplatesConfig. If face_embedding_confidence module is requested, this will also be populated, and the signature will be empty.", - "format": "float", - "type": "number" - }, - "confidenceVersion": { - "description": "The Confidence version that populated the confidence.", - "enum": [ - "EMBEDDING_CONFIDENCE_VERSION_UNSPECIFIED", - "VERSION_1", - "VERSION_2" - ], - "enumDescriptions": [ - "", - "Corresponds to VSSV1DNormTfLiteClient. Regions without an embedding confidence version should be assumed to have this version.", - "Corresponds to AAV2DNorm. This is an animal-aware version with scores compatible with VERSION_1." - ], - "type": "string" - }, - "converterVersion": { - "description": "The converter version that created this template.", - "enum": [ - "UNKNOWN", - "PREC_461", - "PREC_465", - "PREC_470", - "FACENET_7", - "FACENET_8", - "FACENET_CELEBRITY", - "FACENET_9", - "FACENET_9_TPU", - "FACENET_MOBILE_V1_8BITS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "signature": { - "description": "The face template bytes.", - "format": "byte", - "type": "string" - }, - "signatureSource": { - "description": "Specifies the source of the signature in cases where the bytes are from a lower level of the FaceNet architecture. This is useful in combination with the FaceNetClient when it returns multiple outputs and we need to keep track of their contents. For example, this could contain the string 'avgpool-0' while another instance can use the standard 'normalizing' string.", - "type": "string" - }, - "version": { - "description": "The internal version of the template. This is a copy of the version stored within the template.", - "format": "uint32", - "type": "integer" - } - }, - "type": "object" - }, "PornFlagData": { "description": "A protocol buffer to store the url, referer and porn flag for a url. and an optional image score. Next available tag id: 51.", "id": "PornFlagData", @@ -100776,7 +100059,7 @@ "type": "object" }, "QualityNavboostCrapsCrapsData": { - "description": "NEXT TAG: 27", + "description": "NEXT TAG: 28", "id": "QualityNavboostCrapsCrapsData", "properties": { "agingCounts": { @@ -100872,6 +100155,11 @@ }, "url": { "type": "string" + }, + "voterTokenCount": { + "description": "The number of distinct voter tokens (a lower bound on the number of distinct users that contributed to the entry, used for privacy-related filtering).", + "format": "int32", + "type": "integer" } }, "type": "object" @@ -100937,6 +100225,10 @@ "signals": { "$ref": "QualityNavboostCrapsCrapsClickSignals", "description": "CRAPS Signals for the locale." + }, + "voterTokenBitmap": { + "$ref": "QualityNavboostGlueVoterTokenBitmapMessage", + "description": "The set of voter tokens of the sessions that contributed to this feature's stats. Voter tokens are not unique per user, so it is a lower bound on the number of distinct users. Used for privacy-related filtering." } }, "type": "object" @@ -101007,6 +100299,24 @@ }, "type": "object" }, + "QualityNavboostGlueVoterTokenBitmapMessage": { + "description": "Used for aggregating query unique voter_token during merging. We use 4 uint64(s) as a 256-bit bitmap to aggregate distinct voter_tokens in Glue model pipeline. Number of elements should always be either 0 or 4. As an optimization, we store the voter_token as a single uint64 if only one bit is set. See quality/navboost/speedy_glue/util/voter_token_bitmap.h for the class that manages operations on these bitmaps.", + "id": "QualityNavboostGlueVoterTokenBitmapMessage", + "properties": { + "subRange": { + "items": { + "format": "uint64", + "type": "string" + }, + "type": "array" + }, + "voterToken": { + "format": "uint64", + "type": "string" + } + }, + "type": "object" + }, "QualityNsrExperimentalNsrTeamData": { "description": "Experimental NsrTeam data. This is a proto containing versioned signals which can be used to run live experiments. This proto will not be propagated to MDU shards, but it will be populated at query time by go/web-signal-joins inside the CompressedQualitySignals subproto of PerDocData proto. See go/0DayLEs for the design doc. Note how this is only meant to be used during LEs, it should *not* be used for launches.", "id": "QualityNsrExperimentalNsrTeamData", @@ -101136,7 +100446,7 @@ "type": "object" }, "QualityNsrNsrData": { - "description": "NOTE: When adding a new field to be propagated to Raffia check if NsrPatternSignalSpec needs to be updated. Next ID: 54", + "description": "NOTE: When adding a new field to be propagated to Raffia check if NsrPatternSignalSpec needs to be updated. Next ID: 55", "id": "QualityNsrNsrData", "properties": { "articleScore": { @@ -101340,6 +100650,11 @@ "format": "float", "type": "number" }, + "smallPersonalSite": { + "description": "Score of small personal site promotion go/promoting-personal-blogs-v1", + "format": "float", + "type": "number" + }, "spambrainLavcScore": { "deprecated": true, "description": "The SpamBrain LAVC score, as of July 2022. See more information at go/cloverfield-lavc-deck.", @@ -104252,7 +103567,8 @@ "NAME_CORRECTION_LOG", "FUZZY_CONTACT_MATCH", "NEURAL_CONTACT_MATCH", - "NEURAL_CONTACT_MATCH_DARK_LAUNCH" + "NEURAL_CONTACT_MATCH_DARK_LAUNCH", + "PERSONALIZED_NAME_CORRECTION_LOG" ], "enumDescriptions": [ "", @@ -104261,7 +103577,8 @@ "Alternate name from contact correction history.", "Fuzzy match with user's contacts.", "Neural match. See go/phonetic-contact-match.", - "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it." + "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", + "Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." ], "type": "string" } @@ -104479,7 +103796,8 @@ "NAME_CORRECTION_LOG", "FUZZY_CONTACT_MATCH", "NEURAL_CONTACT_MATCH", - "NEURAL_CONTACT_MATCH_DARK_LAUNCH" + "NEURAL_CONTACT_MATCH_DARK_LAUNCH", + "PERSONALIZED_NAME_CORRECTION_LOG" ], "enumDescriptions": [ "", @@ -104488,7 +103806,8 @@ "Alternate name from contact correction history.", "Fuzzy match with user's contacts.", "Neural match. See go/phonetic-contact-match.", - "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it." + "The dark launch for a neural match. We found a match, but we ignore it for serving and just log it.", + "Personalized alternate name from Assistant User Profile that stores personalized contact name corrections under ContactAlternates profile." ], "type": "string" }, @@ -105895,11 +105214,6 @@ "format": "uint64", "type": "string" }, - "productPopularity": { - "description": "Organic product popularity.", - "format": "double", - "type": "number" - }, "relevanceEmbedding": { "description": "Relevance embedding from ShoppingAnnotation.Product", "items": { @@ -107369,20 +106683,6 @@ }, "type": "object" }, - "ReneFaceResponse": { - "description": "The output of the face recognition signal.", - "id": "ReneFaceResponse", - "properties": { - "faces": { - "description": "Recognized faces in the image.", - "items": { - "$ref": "PhotosVisionServiceFaceFaceParams" - }, - "type": "array" - } - }, - "type": "object" - }, "RepositoryAnnotationsGeoTopic": { "description": "GeoTopicality of a document is a set of GeoTopics ordered by their normalized scores.", "id": "RepositoryAnnotationsGeoTopic", @@ -111160,16 +110460,6 @@ "$ref": "RepositoryWebrefSubSegmentIndex", "description": "Identifies the sub-segment where the annotation occurs. See SubSegmentIndex for details. Not present in QRef, also deprecated for URL segment types." }, - "timeOffsetConfidence": { - "description": "Confidence for the time_offset_ms annotation, quantized to values in range 0-127 (see speech::VideoASRServerUtil::ConfidenceQuantize for how the quantization was done). Confidence can be empty for special characters (e.g. spaces).", - "format": "int32", - "type": "integer" - }, - "timeOffsetMs": { - "description": "Timestamp that this mention appeared in the video. The field is only populated for VIDEO_TRANSCRIPT when the byte offset is the same. It is extracted from cdoc.doc_videos.content_based_metadata.transcript_asr.transcript.timestamp.", - "format": "int32", - "type": "integer" - }, "trustedNameConfidence": { "description": "Confidence that this name is a trusted name of the entity. This is set only in case the confidence is higher than an internal threshold (see ConceptProbability).", "format": "float", @@ -112897,10 +112187,6 @@ }, "webrefOutlinkInfos": { "$ref": "RepositoryWebrefWebrefOutlinkInfos" - }, - "webrefOutlinksLegacy": { - "$ref": "Proto2BridgeMessageSet", - "deprecated": true } }, "type": "object" @@ -114346,10 +113632,6 @@ "$ref": "Proto2BridgeMessageSet", "description": "Optional extensions (e.g. taxonomic classifications)." }, - "outlinkInfos": { - "$ref": "RepositoryWebrefWebrefOutlinkInfos", - "description": "Information about the outlinks of this document. " - }, "webrefParsedContentSentence": { "description": "The content (CONTENT section 0) as parsed by WebrefParser. Only used by //r/w/postprocessing/idf/idf-pipeline for document ngram idf computation. Populated when the annotator is run with webref_populate_parsed_content Each webref_parsed_content_sentence represents one sentence of the context where saft annotations were used to determine the sentence boundaries. See r/w/universal/processors/saft/saft-sentence-helper.h for details.", "items": { @@ -116503,961 +115785,6 @@ }, "type": "object" }, - "ResearchVisionFace2cartoonAgeClassifierResults": { - "id": "ResearchVisionFace2cartoonAgeClassifierResults", - "properties": { - "age": { - "enum": [ - "UNKNOWN", - "BABY", - "KID", - "ADULT", - "OLD" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - }, - "predictedAge": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonChinLengthClassifierResults": { - "id": "ResearchVisionFace2cartoonChinLengthClassifierResults", - "properties": { - "chinLength": { - "enum": [ - "UNKNOWN", - "SHORT", - "AVERAGE", - "LONG" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - }, - "confidence": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeColorClassifierResults": { - "id": "ResearchVisionFace2cartoonEyeColorClassifierResults", - "properties": { - "color": { - "enum": [ - "UNKNOWN", - "BROWN_OR_BLACK", - "BLUE_OR_GREEN" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - }, - "confidence": { - "format": "float", - "type": "number" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeEyebrowDistanceClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonEyeEyebrowDistanceClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyeEyebrowDistance": { - "enum": [ - "UNKNOWN", - "SMALL", - "AVERAGE", - "LARGE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeShapeClassifierResults": { - "id": "ResearchVisionFace2cartoonEyeShapeClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "shape": { - "enum": [ - "UNKNOWN", - "DOUBLE_FOLD_EYELID", - "SINGLE_FOLD_EYELID" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeSlantClassifierResults": { - "id": "ResearchVisionFace2cartoonEyeSlantClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyeSlant": { - "enum": [ - "UNKNOWN", - "OUTWARDS", - "AVERAGE", - "INWARDS" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyeVerticalPositionClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonEyeVerticalPositionClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyeVerticalPosition": { - "enum": [ - "UNKNOWN", - "HIGH", - "AVERAGE", - "LOW" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyebrowShapeClassifierResults": { - "id": "ResearchVisionFace2cartoonEyebrowShapeClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyebrowShape": { - "enum": [ - "UNKNOWN", - "ST_BREAK", - "ST_BEND", - "HIGH_DIAGONAL", - "TILT", - "ROUND", - "ANGULAR", - "HIGH_CURVY", - "ROUND_UNEVEN", - "BUSHY_ST", - "UNI" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyebrowThicknessClassifierResults": { - "id": "ResearchVisionFace2cartoonEyebrowThicknessClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyebrowThickness": { - "enum": [ - "UNKNOWN", - "THIN", - "NORMAL", - "THICK", - "VERY_THICK" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonEyebrowWidthClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonEyebrowWidthClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "eyebrowWidth": { - "enum": [ - "UNKNOWN", - "NARROW", - "AVERAGE", - "WIDE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonFace2CartoonResults": { - "description": "Results of the Face2Cartoon pipeline.", - "id": "ResearchVisionFace2cartoonFace2CartoonResults", - "properties": { - "ageClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonAgeClassifierResults" - }, - "type": "array" - }, - "chinLengthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonChinLengthClassifierResults" - }, - "type": "array" - }, - "eyeColorClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeColorClassifierResults" - }, - "type": "array" - }, - "eyeEyebrowDistanceClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeEyebrowDistanceClassifierResults" - }, - "type": "array" - }, - "eyeShapeClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeShapeClassifierResults" - }, - "type": "array" - }, - "eyeSlantClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeSlantClassifierResults" - }, - "type": "array" - }, - "eyeVerticalPositionClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyeVerticalPositionClassifierResults" - }, - "type": "array" - }, - "eyebrowShapeClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyebrowShapeClassifierResults" - }, - "type": "array" - }, - "eyebrowThicknessClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyebrowThicknessClassifierResults" - }, - "type": "array" - }, - "eyebrowWidthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonEyebrowWidthClassifierResults" - }, - "type": "array" - }, - "faceWidthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonFaceWidthClassifierResults" - }, - "type": "array" - }, - "facialHairClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonFacialHairClassifierResults" - }, - "type": "array" - }, - "genderClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonGenderClassifierResults" - }, - "type": "array" - }, - "glassesClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonGlassesClassifierResults" - }, - "type": "array" - }, - "hairColorClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonHairColorClassifierResults" - }, - "type": "array" - }, - "hairStyleClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonHairStyleClassifierResults" - }, - "type": "array" - }, - "interEyeDistanceClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonInterEyeDistanceClassifierResults" - }, - "type": "array" - }, - "jawShapeClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonJawShapeClassifierResults" - }, - "type": "array" - }, - "lipThicknessClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonLipThicknessClassifierResults" - }, - "type": "array" - }, - "mouthVerticalPositionClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonMouthVerticalPositionClassifierResults" - }, - "type": "array" - }, - "mouthWidthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonMouthWidthClassifierResults" - }, - "type": "array" - }, - "noseVerticalPositionClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonNoseVerticalPositionClassifierResults" - }, - "type": "array" - }, - "noseWidthClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonNoseWidthClassifierResults" - }, - "type": "array" - }, - "skinToneClassifierResults": { - "items": { - "$ref": "ResearchVisionFace2cartoonSkinToneClassifierResults" - }, - "type": "array" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonFaceWidthClassifierResults": { - "id": "ResearchVisionFace2cartoonFaceWidthClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "faceWidth": { - "enum": [ - "UNKNOWN", - "NARROW", - "AVERAGE", - "WIDE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonFacialHairClassifierResults": { - "id": "ResearchVisionFace2cartoonFacialHairClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "facialHair": { - "enum": [ - "UNKNOWN", - "NO_FACIAL_HAIR", - "CLOSE_SHAVE", - "SHORT_BEARD_2", - "SHORT_BEARD_1", - "MED_BEARD", - "SHORT_BEARD_5", - "GOATEE", - "MOUSTACHE", - "MOUSTACHE_GOATEE" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonGenderClassifierResults": { - "id": "ResearchVisionFace2cartoonGenderClassifierResults", - "properties": { - "confidence": { - "description": "Uses a scaled version of the FaceSDK classifier's probability as the confidence (since the probability for the selected gender is between (0.5, 1] we scale it to be between (0, 1]).", - "format": "float", - "type": "number" - }, - "gender": { - "enum": [ - "UNKNOWN", - "FEMALE", - "MALE" - ], - "enumDescriptions": [ - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonGlassesClassifierResults": { - "id": "ResearchVisionFace2cartoonGlassesClassifierResults", - "properties": { - "confidence": { - "description": "Uses a scaled version of the FaceSDK classifier's probability as the confidence (since the probability for the selected glasses is between (0.5, 1] we scale it to be between (0, 1]).", - "format": "float", - "type": "number" - }, - "glassesType": { - "enum": [ - "UNKNOWN", - "NO_GLASSES", - "GLASSES", - "DARK_GLASSES" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonHairColorClassifierResults": { - "id": "ResearchVisionFace2cartoonHairColorClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "hairColor": { - "enum": [ - "UNKNOWN", - "BLACK", - "DARK_BROWN", - "LIGHT_BROWN", - "AUBURN", - "ORANGE", - "STRAWBERRY_BLONDE", - "DIRTY_BLONDE", - "BLEACHED_BLONDE", - "GREY", - "WHITE", - "MINT", - "PALE_PINK", - "LAVENDER", - "TEAL", - "PURPLE", - "PINK", - "BLUE", - "GREEN" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonHairStyleClassifierResults": { - "id": "ResearchVisionFace2cartoonHairStyleClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "hairStyle": { - "enum": [ - "UNKNOWN", - "BALD_1", - "BALD_2", - "BALD_3", - "SHAVE_1", - "FRONT_CREW_1", - "SHORT_STRAIGHT_9", - "SHORT_STRAIGHT_17", - "BUN_1", - "SHORT_STRAIGHT_2", - "SHORT_STRAIGHT_10", - "SHORT_STRAIGHT_1", - "SHORT_STRAIGHT_19", - "SHORT_STRAIGHT_4", - "SHORT_STRAIGHT_20", - "SHORT_STRAIGHT_18", - "SHORT_STRAIGHT_11", - "MEDIUM_STRAIGHT_5", - "MEDIUM_STRAIGHT_6", - "MEDIUM_STRAIGHT_3", - "LONG_STRAIGHT_6", - "LONG_STRAIGHT_4", - "LONG_STRAIGHT_2", - "LONG_STRAIGHT_PONYTAIL_2", - "LONG_STRAIGHT_PONYTAIL_1", - "SHORT_WAVY_2", - "MEDIUM_WAVY_1", - "MEDIUM_WAVY_4", - "MEDIUM_WAVY_2", - "LONG_WAVY_1", - "LONG_WAVY_3", - "LONG_WAVY_2", - "LONG_WAVY_4", - "LONG_WAVY_PONYTAIL_4", - "SHORT_CURLY_6", - "SHORT_CURLY_5", - "MEDIUM_CURLY_3", - "SHORT_CURLY_8", - "MEDIUM_CURLY_4", - "LONG_CURLY_3", - "LONG_CURLY_1", - "LONG_CURLY_5", - "LONG_CURLY_4", - "LONG_CURLY_2", - "LONG_CURLY_PONYTAIL_1", - "SHORT_COILY_1", - "SHORT_COILY_5", - "SHORT_COILY_4", - "SHORT_COILY_2", - "MEDIUM_COILY_1", - "LONG_COILY_2", - "LONG_COILY_PONYTAIL_1", - "SHORT_COILY_3", - "LONG_COILY_1", - "BOX_BRAIDS", - "BUN_2", - "COILY_PONYTAIL", - "LONG_COILY_3", - "LONG_COILY_4", - "LONG_COILY_5", - "LONG_COILY_PONYTAIL", - "OTT", - "SHORT_CORNROWS", - "TIGHT_BRAID", - "TIGHT_BRAIDS" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonInterEyeDistanceClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonInterEyeDistanceClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "interEyeDistance": { - "enum": [ - "UNKNOWN", - "WIDE", - "AVERAGE", - "CLOSE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonJawShapeClassifierResults": { - "id": "ResearchVisionFace2cartoonJawShapeClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "jawShape": { - "enum": [ - "UNKNOWN", - "TRIANGLE", - "OVAL", - "SQUARE", - "ROUND" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonLipThicknessClassifierResults": { - "id": "ResearchVisionFace2cartoonLipThicknessClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "lipThickness": { - "enum": [ - "UNKNOWN", - "THIN", - "AVERAGE", - "THICK" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonMouthVerticalPositionClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonMouthVerticalPositionClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "mouthVerticalPosition": { - "enum": [ - "UNKNOWN", - "HIGH", - "AVERAGE", - "LOW" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonMouthWidthClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonMouthWidthClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "mouthWidth": { - "enum": [ - "UNKNOWN", - "NARROW", - "AVERAGE", - "WIDE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonNoseVerticalPositionClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonNoseVerticalPositionClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "noseVerticalPosition": { - "enum": [ - "UNKNOWN", - "HIGH", - "AVERAGE", - "LOW" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonNoseWidthClassifierResults": { - "description": "The measurement underlying this assumes fixed ear positions, so applying this combined with the FaceWidthClassifierResults may have an unintended outcome.", - "id": "ResearchVisionFace2cartoonNoseWidthClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "noseWidth": { - "enum": [ - "UNKNOWN", - "NARROW", - "AVERAGE", - "WIDE" - ], - "enumDescriptions": [ - "", - "", - "", - "" - ], - "type": "string" - } - }, - "type": "object" - }, - "ResearchVisionFace2cartoonSkinToneClassifierResults": { - "id": "ResearchVisionFace2cartoonSkinToneClassifierResults", - "properties": { - "confidence": { - "format": "float", - "type": "number" - }, - "skinToneType": { - "enum": [ - "UNKNOWN", - "TYPE_1", - "TYPE_2", - "TYPE_3", - "TYPE_4", - "TYPE_5", - "TYPE_6", - "TYPE_7", - "TYPE_8", - "TYPE_9", - "TYPE_10", - "TYPE_11" - ], - "enumDescriptions": [ - "See the images from the links at: https://storage.googleapis.com/cc_8e814306-f840-4e2e-9415-36b06251cf8e/ skin_tone_exemplars/skin-*.png", - "(darkest) RGB: #603d30", - "RGB: #88594b", - "RGB: #aa7454", - "RGB: #c78b5d", - "RGB: #d9a16e", - "RGB: #e3b47e", - "RGB: #eeaf94", - "RGB: #f0c092", - "RGB: #f6d8c1", - "RGB: #fbcdb6", - "(lightest) RGB: #fbdbd1" - ], - "type": "string" - } - }, - "type": "object" - }, "RichsnippetsDataObject": { "description": "Next ID: 11", "id": "RichsnippetsDataObject", @@ -125750,7 +124077,7 @@ "type": "object" }, "TrawlerFetchReplyData": { - "description": "Fetcher -> FetchClient FetchReplyData is the metadata for a reply from a FetchRequest. For metadata + document body, FetchReply is further below. NOTE: FetchReplyData (and FetchReply) is the output interface from Multiverse. Teams outside Multiverse/Trawler should not create fake FetchReplies. Trawler: When adding new fields here, it is recommended that at least the following be rebuilt and pushed: - cron_fetcher_index mapreduces: so that UrlReplyIndex, etc. retain the new fields - tlookup, tlookup_server: want to be able to return the new fields - logviewer, fetchutil: annoying to get back 'tag88:' in results -------------------------- Next Tag: 124 -----------------------", + "description": "Fetcher -> FetchClient FetchReplyData is the metadata for a reply from a FetchRequest. For metadata + document body, FetchReply is further below. NOTE: FetchReplyData (and FetchReply) is the output interface from Multiverse. Teams outside Multiverse/Trawler should not create fake FetchReplies. Trawler: When adding new fields here, it is recommended that at least the following be rebuilt and pushed: - cron_fetcher_index mapreduces: so that UrlReplyIndex, etc. retain the new fields - tlookup, tlookup_server: want to be able to return the new fields - logviewer, fetchutil: annoying to get back 'tag88:' in results -------------------------- Next Tag: 125 -----------------------", "id": "TrawlerFetchReplyData", "properties": { "BadSSLCertificate": { @@ -126142,6 +124469,9 @@ "The context of refresh crawl is that client needs to check the content of some URLs periodically, so they refresh those URLs regularly." ], "type": "string" + }, + "webioInfo": { + "$ref": "TrawlerFetchReplyDataWebIOInfo" } }, "type": "object" @@ -127010,6 +125340,34 @@ }, "type": "object" }, + "TrawlerFetchReplyDataWebIOInfo": { + "description": "WebIO is the new hostload model introduced in 2023. It measures the occupancy of 1 outgoing fetch connection for 1 minute.", + "id": "TrawlerFetchReplyDataWebIOInfo", + "properties": { + "webio": { + "format": "float", + "type": "number" + }, + "webioPercentageTier": { + "enum": [ + "WEBIO_TIER_1", + "WEBIO_TIER_2", + "WEBIO_TIER_3", + "WEBIO_TIER_4", + "WEBIO_NUM_TIERS" + ], + "enumDescriptions": [ + "Utilization 90-100%", + "Utilization 70%-90%", + "Utilization 30%-70%", + "Utilization 0%-30%", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "TrawlerFetchStatus": { "id": "TrawlerFetchStatus", "properties": { @@ -128835,6 +127193,9 @@ "MULTILINE_SUBSCRIPTION_ADDON_TITLE_SESSION_LEVEL", "PAYTM_WALLET_FAILURE_SESSION_LEVEL", "CART_ABANDONMENT_SUBSCRIPTION_BENEFITS_SESSION_LEVEL_V2", + "MULTILINE_SUBSCRIPTION_BASIC_RESTORE_ENABLED_SESSION_LEVEL", + "DECLINE_MESSAGE_IN_SUBSCENTER_FIX_FLOW_SESSION_LEVEL_V1", + "SAVE_FOR_LATER_CART_ABANDONMENT_SCREEN_SESSION_LEVEL", "SESSION_LEVEL_TEST_CODE_LIMIT", "CART_ABANDONMENT_USER_LEVEL", "IN_APP_PRODUCTS_IN_DETAILS_PAGE_USER_LEVEL", @@ -129047,12 +127408,7 @@ "HAS_MONETIZATION_BEHAVIOR_LAST_180D_USER_LEVEL", "HAS_LAST_28D_CART_ABANDONMENT_USER_LEVEL", "HAS_LAST_7D_CART_ABANDONMENT_USER_LEVEL", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V2", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_1", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_2", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_3", - "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_4", + "LINK_BIOMETRICS_NEW_SETUP_USER_LEVEL_V3_5", "POST_SUCCESS_ADD_BACKUP_FLOW_USER_LEVEL", "SKIP_CHECK_MARK_SCREEN_WITH_BACKUP_FLOW_USER_LEVEL", "IS_ELIGIBLE_FOR_ONE_CLICK_BACKUP_FOP_USER_LEVEL", @@ -129199,6 +127555,9 @@ "UNIFIED_ITEM_RECOMMENDATION_LOWER_PRICED_USER_LEVEL", "CART_WITH_BROKEN_FOP_USER_LEVEL", "CART_ABANDONMENT_SUBSCRIPTION_BENEFITS_USER_LEVEL_V2", + "DECLINE_MESSAGE_IN_SUBSCENTER_FIX_FLOW_USER_LEVEL", + "MULTILINE_SUBSCRIPTION_BASIC_RESTORE_ENABLED_USER_LEVEL", + "SAVE_FOR_LATER_CART_ABANDONMENT_SCREEN_USER_LEVEL", "USER_LEVEL_TEST_CODE_LIMIT" ], "enumDeprecated": [ @@ -129696,256 +128055,59 @@ false, false, false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, - false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, false, false, false, @@ -130176,6 +128338,89 @@ false, false, false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, true, false, false, @@ -130290,6 +128535,68 @@ false, false, false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, false, false, false, @@ -130405,7 +128712,6 @@ false, false, false, - true, false, false, false, @@ -130433,6 +128739,57 @@ false, false, false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, true, false, false, @@ -130578,6 +128935,9 @@ false, false, false, + false, + false, + false, false ], "enumDescriptions": [ @@ -131593,6 +129953,9 @@ "Session-level test code for multiline addon title.", "Session-level test code for Paytm wallet failures.", "Session-level for showing subscription benefits in cart abandonment.", + "Session_level test code for multiline basic restore enabled.", + "Session-level test code thst indicates decline message is popluated in subscenter.", + "Session level test code for Save For Later cart abandonment screen.", "", "Cart abandonment flow for purchase flow.", "User saw/would have seen the in app products section in App", @@ -131805,11 +130168,6 @@ "User level test code for users who have made any monetization behavior(sub, iap) for the last 180 days (controlled by ULYSSES_OOP_SPEND_PER_PURCHASE_180D), used for AH/GH monetization experiments.", "User level test code for users who have any purchase card abandon behavior in the last 28 day (controlled by LAST_28D_CART_ABANDONMENT_BACKEND), used for AH/GH monetization experiments.", "User level test code for users who have any purchase card abandon behavior in the last 7 day (controlled by LAST_7D_CART_ABANDONMENT_BACKEND), used for AH/GH monetization experiments.", - "User level test code for link biometrics with impression cap and foped user setup.", - "User level test code for link biometrics with impression cap and foped user setup.", - "User level test code for link biometrics with impression cap and foped user setup after traffic rebalancing.", - "User level test code for link biometrics with impression cap and foped user setup after traffic rebalancing.", - "User level test code for link biometrics with impression cap and foped user setup after traffic rebalancing.", "User level test code for link biometrics with impression cap and foped user setup after traffic rebalancing.", "User level test code for post success add backup flow.", "User level test code for skipping ckechmark screen with backup flow.", @@ -131876,7 +130234,7 @@ "", "", "", - "User level test code for reinstall enablement. If user has any eligible reinstall passing the per user filtering logic, testcode will be logged. Note that the filtering logics are controlled by gcl flags. Ex. Play Games Home: http://shortn/_2aGCRQqToq. This test code only knows if any app passes the filtering but not which filtering params are applied.", + "User level test code for reinstall enablement. If user has any eligible reinstall passing the per user filtering logic, testcode will be logged. Note that the filtering logic are controlled by gcl flags. Ex. Play Games Home: http://shortn/_2aGCRQqToq. This test code only knows if any app passes the filtering but not which filtering params are applied.", "User level test code for tagging users who have any app which is recommended by PRS and has reinstall eligibility when is_app_with_historical_oop_purchase restriction is turned on.", "User-level test code for tagging users with previous OOP spend on games.", "User-level test code for tagging users with previous OOP spend on applications.", @@ -131957,6 +130315,9 @@ "", "User level test code indicating that user starts the purchase with a cart that has broken existing form of payment.", "User-level for showing subscription benefits in cart abandonment.", + "User-level test code for users who see the decline message in subscenter.", + "User level test code for multiline basic restore enabled.", + "User level test code for Save For Later cart abandonment screen. Add new user-level TestCode here.", "" ], "type": "string" @@ -132423,7 +130784,8 @@ "NS_SEARCH_SPORTS", "NS_BUSINESSMESSAGING", "NS_AERIAL_VIEW", - "NS_DOCS_FLIX_RENDER" + "NS_DOCS_FLIX_RENDER", + "NS_SHOPPING" ], "enumDeprecated": [ false, @@ -132468,6 +130830,7 @@ false, false, false, + false, false ], "enumDescriptions": [ @@ -132513,7 +130876,8 @@ "Namespace for Search Sports vertical videos.", "Namespace for Business Messaging videos.", "Namespace for Geo Aerial View", - "Namespace for Flix Render (Docs) Please receive approval via go/vp-newclients before adding a new namespace." + "Namespace for Flix Render (Docs)", + "Namespace for CDS videos processed through Amarna. Please receive approval via go/vp-newclients before adding a new namespace." ], "type": "string" } @@ -148987,7 +147351,9 @@ "COUNTERFEIT", "COURT_ORDER", "CTM", + "DANGEROUS", "DEFAMATION", + "EATING_DISORDERS", "GOVERNMENT_ORDER", "HARASSMENT", "HATE", @@ -149003,17 +147369,17 @@ "QUOTA_EXCEEDED", "REGULATED", "SPAM", + "SUICIDE_AND_SELF_HARM", "TRADEMARK", "UNSAFE_RACY", "UNWANTED_SOFTWARE", "UNWANTED_CONTENT", "VIOLENCE", - "DANGEROUS", + "VIOLENT_EXTREMISM", "BLOCKED_LINKS", "BLOCKED_WORDS", "ENABLED_HOLD_ALL", "HIDDEN_USER_LIST", - "VIOLENT_EXTREMISM", "PRIVILEGED_USER_REJECTED", "ABOVE_REJECT_INAPPROPRIATE_SCORE", "TOO_MANY_BAD_CHARS", @@ -149027,7 +147393,9 @@ "Promotion of counterfeit product claims.", "Third-party court orders.", "Circumvention of Technological measures claims. Circumventing protection mechanisms on copyrighted work.", + "Content depicts or provides instructions to complete activities that are dangerous and/or widely illegal, e.g. prostitution, bomb-making, suicide.", "Defamation claims.", + "Content that demonstrates eating disorders", "Government request, regardless of reason.", "Consistent harassing behavior directed towards a person.", "", @@ -149043,17 +147411,17 @@ "", "Contains regulated products and services, such as pharmaceuticals, alcohol, tobacco, etc. For details, https://sites.google.com/a/google.com/crt-policy-site/regulated", "", + "Content that demonstrates suicide and self harm", "Trademark violations where Google could be liable.", "Content that is unsafe because it is sexually suggestive/racy.", "The software is deceptive, promising a value proposition that it does not meet, or tries to trick users into installing it or it piggybacks on the installation of another program, or doesn\u2019t tell the user about all of its principal and significant functions or affects the user\u2019s system in unexpected ways, or collects or transmits private information without the user\u2019s knowledge, or bundled with other software and its presence is not disclosed.", "Content includes spammy commercial content, such as links to MFA pages, affiliate links, ads or solicitation, or otherwise off-topic or irrelevant content.", "", - "Content depicts or provides instructions to complete activities that are dangerous and/or widely illegal, e.g. prostitution, bomb-making, suicide.", + "Content that recruits or solicits terrorists; specific and detailed instructions on how to make a bomb; terrorists who document their attacks; praising acts of mass violence; content that shows captured hostages posted with the intent to solicit demands, threaten, or intimidate.", "Comment contains links in a list of \"blocked links\" in YouTube Studio > Settings > Community.", "Creator setting specific reasons. go/ytc-nextgen-community-settings-storage Comment contains words in a list of \"blocked words\" in YouTube Studio > Settings > Community.", "Held because the moderation policy is \"Hold all comments for review\".", "Comment from listed hidden users.", - "Content that recruits or solicits terrorists; specific and detailed instructions on how to make a bomb; terrorists who document their attacks; praising acts of mass violence; content that shows captured hostages posted with the intent to solicit demands, threaten, or intimidate.", "A privileged user, which can only be parent entity owner for ENTITY_COMMENT, but can be either parent entity channel owner or channel moderator for CHAT_MESSAGE CommentType, manually rejected the Comment. Their decision overrides any system flagging.", "Automod rejected due to above inappropriate score rejection threshold. Maps to ModerationReason.ABOVE_REJECT_INAPPROPRIATE_SCORE.", "Automod rejected due to containing more than allowed bad characters. Maps to ModerationReason.TOO_MANY_BAD_CHARS.", diff --git a/googleapiclient/discovery_cache/documents/customsearch.v1.json b/googleapiclient/discovery_cache/documents/customsearch.v1.json index bfa1c42ab37..1c2a3ce5c86 100644 --- a/googleapiclient/discovery_cache/documents/customsearch.v1.json +++ b/googleapiclient/discovery_cache/documents/customsearch.v1.json @@ -688,7 +688,7 @@ } } }, - "revision": "20231021", + "revision": "20231025", "rootUrl": "https://customsearch.googleapis.com/", "schemas": { "Promotion": { diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1.json index b1bf65ccf0a..81da9a14c60 100644 --- a/googleapiclient/discovery_cache/documents/datacatalog.v1.json +++ b/googleapiclient/discovery_cache/documents/datacatalog.v1.json @@ -2144,7 +2144,7 @@ } } }, - "revision": "20231013", + "revision": "20231025", "rootUrl": "https://datacatalog.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json index 90f1c32adba..32c45490586 100644 --- a/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datacatalog.v1beta1.json @@ -1813,7 +1813,7 @@ } } }, - "revision": "20231013", + "revision": "20231025", "rootUrl": "https://datacatalog.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json index 548877c8889..24431288693 100644 --- a/googleapiclient/discovery_cache/documents/dataflow.v1b3.json +++ b/googleapiclient/discovery_cache/documents/dataflow.v1b3.json @@ -2221,7 +2221,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://dataflow.googleapis.com/", "schemas": { "ApproximateProgress": { @@ -3932,6 +3932,11 @@ "$ref": "RuntimeUpdatableParams", "description": "This field may ONLY be modified at runtime using the projects.jobs.update method to adjust job behavior. This field has no effect when specified at job creation." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests.", "type": "boolean" diff --git a/googleapiclient/discovery_cache/documents/datalineage.v1.json b/googleapiclient/discovery_cache/documents/datalineage.v1.json index d2e7f11332c..0361dc6f27b 100644 --- a/googleapiclient/discovery_cache/documents/datalineage.v1.json +++ b/googleapiclient/discovery_cache/documents/datalineage.v1.json @@ -798,7 +798,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://datalineage.googleapis.com/", "schemas": { "GoogleCloudDatacatalogLineageV1BatchSearchLinkProcessesRequest": { diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1.json b/googleapiclient/discovery_cache/documents/datamigration.v1.json index f5ce8f460f2..86a5d3ec19d 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1.json @@ -2097,7 +2097,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AlloyDbConnectionProfile": { diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json index fb250273ad5..9b1e11fb3f5 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json @@ -1049,7 +1049,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/datapipelines.v1.json b/googleapiclient/discovery_cache/documents/datapipelines.v1.json index 0d117c85388..7f397a212f4 100644 --- a/googleapiclient/discovery_cache/documents/datapipelines.v1.json +++ b/googleapiclient/discovery_cache/documents/datapipelines.v1.json @@ -369,7 +369,7 @@ } } }, - "revision": "20231008", + "revision": "20231022", "rootUrl": "https://datapipelines.googleapis.com/", "schemas": { "GoogleCloudDatapipelinesV1DataflowJobDetails": { diff --git a/googleapiclient/discovery_cache/documents/dataplex.v1.json b/googleapiclient/discovery_cache/documents/dataplex.v1.json index a3de5f5e675..07702ba54e6 100644 --- a/googleapiclient/discovery_cache/documents/dataplex.v1.json +++ b/googleapiclient/discovery_cache/documents/dataplex.v1.json @@ -4360,7 +4360,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://dataplex.googleapis.com/", "schemas": { "Empty": { @@ -5555,7 +5555,7 @@ "id": "GoogleCloudDataplexV1DataQualityResult", "properties": { "dimensions": { - "description": "A list of results at the dimension level.", + "description": "A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it.", "items": { "$ref": "GoogleCloudDataplexV1DataQualityDimensionResult" }, diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1.json b/googleapiclient/discovery_cache/documents/dataproc.v1.json index e93e3d5708f..1b8436bcccd 100644 --- a/googleapiclient/discovery_cache/documents/dataproc.v1.json +++ b/googleapiclient/discovery_cache/documents/dataproc.v1.json @@ -3006,7 +3006,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -4252,6 +4252,17 @@ }, "type": "object" }, + "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig": { + "description": "Encryption settings for the encrypting customer core content. NEXT ID: 2", + "id": "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig", + "properties": { + "kmsKey": { + "description": "Optional. The Cloud KMS key name to use for encrypting customer core content.", + "type": "string" + } + }, + "type": "object" + }, "HadoopJob": { "description": "A Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).", "id": "HadoopJob", @@ -5178,6 +5189,14 @@ }, "readOnly": true, "type": "array" + }, + "unreachable": { + "description": "Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" } }, "type": "object" @@ -5411,14 +5430,16 @@ "CREATE", "UPDATE", "DELETE", - "RESIZE" + "RESIZE", + "REPAIR" ], "enumDescriptions": [ "Node group operation type is unknown.", "Create node group operation type.", "Update node group operation type.", "Delete node group operation type.", - "Resize node group operation type." + "Resize node group operation type.", + "Repair node group operation type." ], "type": "string" }, @@ -6915,6 +6936,15 @@ "description": "Usage metrics represent approximate total resources consumed by a workload.", "id": "UsageMetrics", "properties": { + "acceleratorType": { + "description": "Optional. Accelerator type being used, if any", + "type": "string" + }, + "milliAcceleratorSeconds": { + "description": "Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).", + "format": "int64", + "type": "string" + }, "milliDcuSeconds": { "description": "Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).", "format": "int64", @@ -6932,6 +6962,15 @@ "description": "The usage snapshot represents the resources consumed by a workload at a specified time.", "id": "UsageSnapshot", "properties": { + "acceleratorType": { + "description": "Optional. Accelerator type being used, if any", + "type": "string" + }, + "milliAccelerator": { + "description": "Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing))", + "format": "int64", + "type": "string" + }, "milliDcu": { "description": "Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).", "format": "int64", @@ -7171,6 +7210,10 @@ "format": "google-duration", "type": "string" }, + "encryptionConfig": { + "$ref": "GoogleCloudDataprocV1WorkflowTemplateEncryptionConfig", + "description": "Optional. Encryption settings for the encrypting customer core content." + }, "id": { "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/datastore.v1.json b/googleapiclient/discovery_cache/documents/datastore.v1.json index bb60b59b988..5a411f01e16 100644 --- a/googleapiclient/discovery_cache/documents/datastore.v1.json +++ b/googleapiclient/discovery_cache/documents/datastore.v1.json @@ -654,7 +654,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://datastore.googleapis.com/", "schemas": { "Aggregation": { diff --git a/googleapiclient/discovery_cache/documents/datastore.v1beta1.json b/googleapiclient/discovery_cache/documents/datastore.v1beta1.json index 951adaf7a21..28da4cda5f1 100644 --- a/googleapiclient/discovery_cache/documents/datastore.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datastore.v1beta1.json @@ -168,7 +168,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://datastore.googleapis.com/", "schemas": { "GoogleDatastoreAdminV1CommonMetadata": { diff --git a/googleapiclient/discovery_cache/documents/datastore.v1beta3.json b/googleapiclient/discovery_cache/documents/datastore.v1beta3.json index 4bdae823e94..40aacf3dcaf 100644 --- a/googleapiclient/discovery_cache/documents/datastore.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/datastore.v1beta3.json @@ -336,7 +336,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://datastore.googleapis.com/", "schemas": { "Aggregation": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v2.json b/googleapiclient/discovery_cache/documents/dialogflow.v2.json index 585e38687cf..e42b968215c 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v2.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v2.json @@ -8327,7 +8327,7 @@ } } }, - "revision": "20231019", + "revision": "20231026", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json b/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json index ce0ba4a162b..fe55da5c674 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v2beta1.json @@ -7695,7 +7695,7 @@ } } }, - "revision": "20231019", + "revision": "20231026", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v3.json b/googleapiclient/discovery_cache/documents/dialogflow.v3.json index f593dda8b63..821c44ab99b 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v3.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v3.json @@ -4126,7 +4126,7 @@ } } }, - "revision": "20231019", + "revision": "20231026", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { diff --git a/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json b/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json index 1ccef8db8e1..e4f11ca8680 100644 --- a/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json +++ b/googleapiclient/discovery_cache/documents/dialogflow.v3beta1.json @@ -4126,7 +4126,7 @@ } } }, - "revision": "20231019", + "revision": "20231026", "rootUrl": "https://dialogflow.googleapis.com/", "schemas": { "GoogleCloudDialogflowCxV3AdvancedSettings": { diff --git a/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json b/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json index 348be6dafd1..a3cc479ac57 100644 --- a/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json +++ b/googleapiclient/discovery_cache/documents/digitalassetlinks.v1.json @@ -199,7 +199,7 @@ } } }, - "revision": "20231014", + "revision": "20231028", "rootUrl": "https://digitalassetlinks.googleapis.com/", "schemas": { "AndroidAppAsset": { diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json index 958fc7bc17e..df9ffdc7f84 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json @@ -2892,7 +2892,7 @@ } } }, - "revision": "20231016", + "revision": "20231019", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiHttpBody": { diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json index 9749f91bd65..d9157f5c3bd 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json @@ -2658,7 +2658,7 @@ } } }, - "revision": "20231016", + "revision": "20231019", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiHttpBody": { diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v1.json b/googleapiclient/discovery_cache/documents/displayvideo.v1.json index 4e954aa6e7b..42112c29fde 100644 --- a/googleapiclient/discovery_cache/documents/displayvideo.v1.json +++ b/googleapiclient/discovery_cache/documents/displayvideo.v1.json @@ -8146,7 +8146,7 @@ } } }, - "revision": "20231020", + "revision": "20231026", "rootUrl": "https://displayvideo.googleapis.com/", "schemas": { "ActivateManualTriggerRequest": { @@ -10885,7 +10885,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -10899,6 +10900,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -10913,7 +10915,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } @@ -17706,7 +17709,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -17720,6 +17724,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -17734,7 +17739,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } @@ -17780,7 +17786,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -17794,6 +17801,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -17808,7 +17816,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v2.json b/googleapiclient/discovery_cache/documents/displayvideo.v2.json index 43408ba549c..e8196500115 100644 --- a/googleapiclient/discovery_cache/documents/displayvideo.v2.json +++ b/googleapiclient/discovery_cache/documents/displayvideo.v2.json @@ -9172,7 +9172,7 @@ } } }, - "revision": "20231020", + "revision": "20231026", "rootUrl": "https://displayvideo.googleapis.com/", "schemas": { "ActivateManualTriggerRequest": { @@ -12183,7 +12183,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -12197,6 +12198,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -12211,7 +12213,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } @@ -19481,7 +19484,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -19495,6 +19499,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -19509,7 +19514,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } @@ -19555,7 +19561,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -19569,6 +19576,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -19583,7 +19591,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v3.json b/googleapiclient/discovery_cache/documents/displayvideo.v3.json index 2350feb57bb..68d5771b4f3 100644 --- a/googleapiclient/discovery_cache/documents/displayvideo.v3.json +++ b/googleapiclient/discovery_cache/documents/displayvideo.v3.json @@ -9127,7 +9127,7 @@ } } }, - "revision": "20231020", + "revision": "20231026", "rootUrl": "https://displayvideo.googleapis.com/", "schemas": { "ActiveViewVideoViewabilityMetricConfig": { @@ -9258,7 +9258,7 @@ "AD_GROUP_FORMAT_BUMPER", "AD_GROUP_FORMAT_NON_SKIPPABLE_IN_STREAM", "AD_GROUP_FORMAT_AUDIO", - "AD_GROUP_FORMAT_ACTION", + "AD_GROUP_FORMAT_RESPONSIVE", "AD_GROUP_FORMAT_REACH", "AD_GROUP_FORMAT_MASTHEAD" ], @@ -9269,7 +9269,7 @@ "Bumper ads.", "Non-skippable in-stream ads.", "Non-skippable in-stream audio ads.", - "[Responsive ads for video action campaigns] (https://support.google.com/displayvideo/answer/9065351).", + "Responsive ads.", "[Effective reach ad groups] (https://support.google.com/displayvideo/answer/9173684), including in-stream and bumper ads.", "Masthead Ad that is surfaced on the top slot on the YouTube homepage." ], @@ -12742,7 +12742,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -12756,6 +12757,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -12770,7 +12772,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } @@ -20179,7 +20182,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -20193,6 +20197,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -20207,7 +20212,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } @@ -20253,7 +20259,8 @@ "SDF_VERSION_5_3", "SDF_VERSION_5_4", "SDF_VERSION_5_5", - "SDF_VERSION_6" + "SDF_VERSION_6", + "SDF_VERSION_7" ], "enumDeprecated": [ false, @@ -20267,6 +20274,7 @@ true, false, false, + false, false ], "enumDescriptions": [ @@ -20281,7 +20289,8 @@ "SDF version 5.3", "SDF version 5.4", "SDF version 5.5", - "SDF version 6" + "SDF version 6", + "SDF version 7 Read the [v7 migration guide](/display-video/api/structured-data-file/v7-migration-guide) before migrating to this version. Currently in beta. Only available for use by a subset of users." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/dlp.v2.json b/googleapiclient/discovery_cache/documents/dlp.v2.json index 621b73f3e66..981ea3472d9 100644 --- a/googleapiclient/discovery_cache/documents/dlp.v2.json +++ b/googleapiclient/discovery_cache/documents/dlp.v2.json @@ -290,18 +290,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`.", "location": "query", "type": "string" }, @@ -446,18 +446,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`.", "location": "query", "type": "string" }, @@ -604,18 +604,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`.", "location": "query", "type": "string" }, @@ -668,7 +668,7 @@ "discoveryConfigs": { "methods": { "create": { - "description": "Creates a config for Discovery to scan and profile storage.", + "description": "Creates a config for discovery to scan and profile storage.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/discoveryConfigs", "httpMethod": "POST", "id": "dlp.organizations.locations.discoveryConfigs.create", @@ -696,7 +696,7 @@ ] }, "delete": { - "description": "Deletes a Discovery configuration.", + "description": "Deletes a discovery configuration.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/discoveryConfigs/{discoveryConfigsId}", "httpMethod": "DELETE", "id": "dlp.organizations.locations.discoveryConfigs.delete", @@ -721,7 +721,7 @@ ] }, "get": { - "description": "Gets a Discovery configuration.", + "description": "Gets a discovery configuration.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/discoveryConfigs/{discoveryConfigsId}", "httpMethod": "GET", "id": "dlp.organizations.locations.discoveryConfigs.get", @@ -746,7 +746,7 @@ ] }, "list": { - "description": "Lists Discovery configurations.", + "description": "Lists discovery configurations.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/discoveryConfigs", "httpMethod": "GET", "id": "dlp.organizations.locations.discoveryConfigs.list", @@ -755,18 +755,18 @@ ], "parameters": { "orderBy": { - "description": "Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status.", + "description": "Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by a server.", + "description": "Size of the page. This value can be limited by a server.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls.", + "description": "Page token to continue retrieval. Comes from the previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls.", "location": "query", "type": "string" }, @@ -787,7 +787,7 @@ ] }, "patch": { - "description": "Updates a Discovery configuration.", + "description": "Updates a discovery configuration.", "flatPath": "v2/organizations/{organizationsId}/locations/{locationsId}/discoveryConfigs/{discoveryConfigsId}", "httpMethod": "PATCH", "id": "dlp.organizations.locations.discoveryConfigs.patch", @@ -838,7 +838,7 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state`", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state`", "location": "query", "type": "string" }, @@ -981,18 +981,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`.", "location": "query", "type": "string" }, @@ -1142,18 +1142,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status.", + "description": "Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by a server.", + "description": "Size of the page. This value can be limited by a server.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to ListJobTriggers. `order_by` field must not change for subsequent calls.", + "description": "Page token to continue retrieval. Comes from the previous call to ListJobTriggers. `order_by` field must not change for subsequent calls.", "location": "query", "type": "string" }, @@ -1313,18 +1313,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`.", "location": "query", "type": "string" }, @@ -1471,18 +1471,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`.", "location": "query", "type": "string" }, @@ -1719,18 +1719,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`.", "location": "query", "type": "string" }, @@ -1908,7 +1908,7 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state`", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state`", "location": "query", "type": "string" }, @@ -2083,18 +2083,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`.", "location": "query", "type": "string" }, @@ -2272,18 +2272,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status.", + "description": "Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by a server.", + "description": "Size of the page. This value can be limited by a server.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to ListJobTriggers. `order_by` field must not change for subsequent calls.", + "description": "Page token to continue retrieval. Comes from the previous call to ListJobTriggers. `order_by` field must not change for subsequent calls.", "location": "query", "type": "string" }, @@ -2533,18 +2533,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`.", "location": "query", "type": "string" }, @@ -2597,7 +2597,7 @@ "discoveryConfigs": { "methods": { "create": { - "description": "Creates a config for Discovery to scan and profile storage.", + "description": "Creates a config for discovery to scan and profile storage.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/discoveryConfigs", "httpMethod": "POST", "id": "dlp.projects.locations.discoveryConfigs.create", @@ -2625,7 +2625,7 @@ ] }, "delete": { - "description": "Deletes a Discovery configuration.", + "description": "Deletes a discovery configuration.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/discoveryConfigs/{discoveryConfigsId}", "httpMethod": "DELETE", "id": "dlp.projects.locations.discoveryConfigs.delete", @@ -2650,7 +2650,7 @@ ] }, "get": { - "description": "Gets a Discovery configuration.", + "description": "Gets a discovery configuration.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/discoveryConfigs/{discoveryConfigsId}", "httpMethod": "GET", "id": "dlp.projects.locations.discoveryConfigs.get", @@ -2675,7 +2675,7 @@ ] }, "list": { - "description": "Lists Discovery configurations.", + "description": "Lists discovery configurations.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/discoveryConfigs", "httpMethod": "GET", "id": "dlp.projects.locations.discoveryConfigs.list", @@ -2684,18 +2684,18 @@ ], "parameters": { "orderBy": { - "description": "Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status.", + "description": "Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by a server.", + "description": "Size of the page. This value can be limited by a server.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls.", + "description": "Page token to continue retrieval. Comes from the previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls.", "location": "query", "type": "string" }, @@ -2716,7 +2716,7 @@ ] }, "patch": { - "description": "Updates a Discovery configuration.", + "description": "Updates a discovery configuration.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/discoveryConfigs/{discoveryConfigsId}", "httpMethod": "PATCH", "id": "dlp.projects.locations.discoveryConfigs.patch", @@ -2929,7 +2929,7 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state`", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state`", "location": "query", "type": "string" }, @@ -3104,18 +3104,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`.", "location": "query", "type": "string" }, @@ -3321,18 +3321,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status.", + "description": "Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by a server.", + "description": "Size of the page. This value can be limited by a server.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to ListJobTriggers. `order_by` field must not change for subsequent calls.", + "description": "Page token to continue retrieval. Comes from the previous call to ListJobTriggers. `order_by` field must not change for subsequent calls.", "location": "query", "type": "string" }, @@ -3492,18 +3492,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`.", "location": "query", "type": "string" }, @@ -3650,18 +3650,18 @@ "type": "string" }, "orderBy": { - "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name.", + "description": "Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name.", "location": "query", "type": "string" }, "pageSize": { - "description": "Size of the page, can be limited by the server. If zero server returns a page of max size 100.", + "description": "Size of the page. This value can be limited by the server. If zero server returns a page of max size 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { - "description": "Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`.", + "description": "Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`.", "location": "query", "type": "string" }, @@ -3714,7 +3714,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://dlp.googleapis.com/", "schemas": { "GooglePrivacyDlpV2Action": { @@ -3853,7 +3853,7 @@ "type": "object" }, "GooglePrivacyDlpV2BigQueryDiscoveryTarget": { - "description": "Target used to match against for Discovery with BigQuery tables", + "description": "Target used to match against for discovery with BigQuery tables", "id": "GooglePrivacyDlpV2BigQueryDiscoveryTarget", "properties": { "cadence": { @@ -3870,7 +3870,7 @@ }, "filter": { "$ref": "GooglePrivacyDlpV2DiscoveryBigQueryFilter", - "description": "Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table." + "description": "Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table." } }, "type": "object" @@ -4025,11 +4025,11 @@ "type": "object" }, "GooglePrivacyDlpV2BigQueryTableTypes": { - "description": "The types of bigquery tables supported by Cloud DLP.", + "description": "The types of BigQuery tables supported by Cloud DLP.", "id": "GooglePrivacyDlpV2BigQueryTableTypes", "properties": { "types": { - "description": "A set of bigquery table types.", + "description": "A set of BigQuery table types.", "items": { "enum": [ "BIG_QUERY_TABLE_TYPE_UNSPECIFIED", @@ -4757,7 +4757,7 @@ "id": "GooglePrivacyDlpV2CreateDiscoveryConfigRequest", "properties": { "configId": { - "description": "The config id can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one.", + "description": "The config ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one.", "type": "string" }, "discoveryConfig": { @@ -5069,7 +5069,7 @@ "type": "array" }, "inspectTemplates": { - "description": "Detection logic for profile generation. Not all template features are used by profiles. FindingLimits, include_quote and exclude_info_types have no impact on data profiling. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including \"global\"). Each region is scanned using the applicable template. If no region-specific template is specified, but a \"global\" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency.", + "description": "Detection logic for profile generation. Not all template features are used by profiles. FindingLimits, include_quote and exclude_info_types have no impact on data profiling. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including \"global\"). Each region is scanned using the applicable template. If no region-specific template is specified, but a \"global\" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency.", "items": { "type": "string" }, @@ -5573,13 +5573,13 @@ "type": "object" }, "GooglePrivacyDlpV2Disabled": { - "description": "Do nothing.", + "description": "Do not profile the tables.", "id": "GooglePrivacyDlpV2Disabled", "properties": {}, "type": "object" }, "GooglePrivacyDlpV2DiscoveryBigQueryConditions": { - "description": "Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes.", + "description": "Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age).", "id": "GooglePrivacyDlpV2DiscoveryBigQueryConditions", "properties": { "createdAfter": { @@ -5592,7 +5592,7 @@ "description": "At least one of the conditions must be true for a table to be scanned." }, "typeCollection": { - "description": "Restrict Discovery to categories of table types.", + "description": "Restrict discovery to categories of table types.", "enum": [ "BIG_QUERY_COLLECTION_UNSPECIFIED", "BIG_QUERY_COLLECTION_ALL_TYPES", @@ -5600,20 +5600,20 @@ ], "enumDescriptions": [ "Unused.", - "Automatically generate profiles for all tables, even if the table type is not yet fully supported for analysis. These unsupported profiles will be generated with errors to indicate their partial support. When support is added, they will automatically be profiled during the next scheduled run.", - "Only those types fully supported will be profiled. Will expand automatically as new support is added. Unsupported table types will not have a profile generated." + "Automatically generate profiles for all tables, even if the table type is not yet fully supported for analysis. Profiles for unsupported tables will be generated with errors to indicate their partial support. When full support is added, the tables will automatically be profiled during the next scheduled run.", + "Only those types fully supported will be profiled. Will expand automatically as Cloud DLP adds support for new table types. Unsupported table types will not have partial profiles generated." ], "type": "string" }, "types": { "$ref": "GooglePrivacyDlpV2BigQueryTableTypes", - "description": "Restrict Discovery to specific table types." + "description": "Restrict discovery to specific table types." } }, "type": "object" }, "GooglePrivacyDlpV2DiscoveryBigQueryFilter": { - "description": "Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age).", + "description": "Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID.", "id": "GooglePrivacyDlpV2DiscoveryBigQueryFilter", "properties": { "otherTables": { @@ -5628,7 +5628,7 @@ "type": "object" }, "GooglePrivacyDlpV2DiscoveryConfig": { - "description": "Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention).", + "description": "Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention).", "id": "GooglePrivacyDlpV2DiscoveryConfig", "properties": { "actions": { @@ -5657,7 +5657,7 @@ "type": "array" }, "inspectTemplates": { - "description": "Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including \"global\"). Each region is scanned using the applicable template. If no region-specific template is specified, but a \"global\" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency.", + "description": "Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including \"global\"). Each region is scanned using the applicable template. If no region-specific template is specified, but a \"global\" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency.", "items": { "type": "string" }, @@ -5686,8 +5686,8 @@ ], "enumDescriptions": [ "Unused", - "The Discovery config is currently active.", - "The Discovery config is paused temporarily." + "The discovery config is currently active.", + "The discovery config is paused temporarily." ], "type": "string" }, @@ -5763,7 +5763,7 @@ "type": "object" }, "GooglePrivacyDlpV2DiscoveryStartingLocation": { - "description": "The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization.", + "description": "The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization.", "id": "GooglePrivacyDlpV2DiscoveryStartingLocation", "properties": { "folderId": { @@ -7516,7 +7516,7 @@ "type": "array" }, "nextPageToken": { - "description": "If the next page is available then the next page token to be used in following ListDeidentifyTemplates request.", + "description": "If the next page is available then the next page token to be used in the following ListDeidentifyTemplates request.", "type": "string" } }, @@ -7534,7 +7534,7 @@ "type": "array" }, "nextPageToken": { - "description": "If the next page is available then the next page token to be used in following ListDiscoveryConfigs request.", + "description": "If the next page is available then the next page token to be used in the following ListDiscoveryConfigs request.", "type": "string" } }, @@ -7584,7 +7584,7 @@ "type": "array" }, "nextPageToken": { - "description": "If the next page is available then the next page token to be used in following ListInspectTemplates request.", + "description": "If the next page is available then the next page token to be used in the following ListInspectTemplates request.", "type": "string" } }, @@ -7602,7 +7602,7 @@ "type": "array" }, "nextPageToken": { - "description": "If the next page is available then the next page token to be used in following ListJobTriggers request.", + "description": "If the next page is available then the next page token to be used in the following ListJobTriggers request.", "type": "string" } }, @@ -7613,7 +7613,7 @@ "id": "GooglePrivacyDlpV2ListStoredInfoTypesResponse", "properties": { "nextPageToken": { - "description": "If the next page is available then the next page token to be used in following ListStoredInfoTypes request.", + "description": "If the next page is available then the next page token to be used in the following ListStoredInfoTypes request.", "type": "string" }, "storedInfoTypes": { @@ -9417,7 +9417,7 @@ "properties": { "discoveryConfig": { "$ref": "GooglePrivacyDlpV2DiscoveryConfig", - "description": "New DiscoveryConfig value." + "description": "Required. New DiscoveryConfig value." }, "updateMask": { "description": "Mask to control which fields get updated.", diff --git a/googleapiclient/discovery_cache/documents/dns.v1.json b/googleapiclient/discovery_cache/documents/dns.v1.json index 3e2fa8b7a6f..3a06560079d 100644 --- a/googleapiclient/discovery_cache/documents/dns.v1.json +++ b/googleapiclient/discovery_cache/documents/dns.v1.json @@ -1824,7 +1824,7 @@ } } }, - "revision": "20230923", + "revision": "20231020", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { diff --git a/googleapiclient/discovery_cache/documents/dns.v1beta2.json b/googleapiclient/discovery_cache/documents/dns.v1beta2.json index 7e994fc39bb..6f580ee82f4 100644 --- a/googleapiclient/discovery_cache/documents/dns.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/dns.v1beta2.json @@ -1821,7 +1821,7 @@ } } }, - "revision": "20230923", + "revision": "20231020", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { diff --git a/googleapiclient/discovery_cache/documents/docs.v1.json b/googleapiclient/discovery_cache/documents/docs.v1.json index c33ee444613..4df74e6e6fe 100644 --- a/googleapiclient/discovery_cache/documents/docs.v1.json +++ b/googleapiclient/discovery_cache/documents/docs.v1.json @@ -216,7 +216,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://docs.googleapis.com/", "schemas": { "AutoText": { diff --git a/googleapiclient/discovery_cache/documents/documentai.v1.json b/googleapiclient/discovery_cache/documents/documentai.v1.json index dc77d5b032c..e79e1270391 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1.json @@ -1042,7 +1042,7 @@ } } }, - "revision": "20231012", + "revision": "20231021", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": { diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json index 5c8fc377df5..a83be0c0d67 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1beta2.json @@ -292,7 +292,7 @@ } } }, - "revision": "20231012", + "revision": "20231021", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": { diff --git a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json index cb02ca0ba7d..a9fb83dd185 100644 --- a/googleapiclient/discovery_cache/documents/documentai.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/documentai.v1beta3.json @@ -1284,7 +1284,7 @@ } } }, - "revision": "20231012", + "revision": "20231021", "rootUrl": "https://documentai.googleapis.com/", "schemas": { "GoogleCloudDocumentaiUiv1beta3AutoLabelDocumentsMetadata": { diff --git a/googleapiclient/discovery_cache/documents/domains.v1.json b/googleapiclient/discovery_cache/documents/domains.v1.json index 6393b367897..ce93d6a7eec 100644 --- a/googleapiclient/discovery_cache/documents/domains.v1.json +++ b/googleapiclient/discovery_cache/documents/domains.v1.json @@ -843,7 +843,7 @@ } } }, - "revision": "20230925", + "revision": "20231018", "rootUrl": "https://domains.googleapis.com/", "schemas": { "AuditConfig": { @@ -1834,7 +1834,8 @@ "IMPORT_PENDING", "ACTIVE", "SUSPENDED", - "EXPORTED" + "EXPORTED", + "EXPIRED" ], "enumDescriptions": [ "The state is undefined.", @@ -1845,7 +1846,8 @@ "The domain is being imported from Google Domains to Cloud Domains.", "The domain is registered and operational. The domain renews automatically as long as it remains in this state.", "The domain is suspended and inoperative. For more details, see the `issues` field.", - "The domain is no longer managed with Cloud Domains. It may have been transferred to another registrar or exported for management in [Google Domains](https://domains.google/). You can no longer update it with this API, and information shown about it may be stale. Domains in this state are not automatically renewed by Cloud Domains." + "The domain is no longer managed with Cloud Domains. It may have been transferred to another registrar or exported for management in [Google Domains](https://domains.google/). You can no longer update it with this API, and information shown about it may be stale. Domains in this state are not automatically renewed by Cloud Domains.", + "The domain is expired." ], "readOnly": true, "type": "string" diff --git a/googleapiclient/discovery_cache/documents/domains.v1alpha2.json b/googleapiclient/discovery_cache/documents/domains.v1alpha2.json index 6f656894ac5..139278ddfe2 100644 --- a/googleapiclient/discovery_cache/documents/domains.v1alpha2.json +++ b/googleapiclient/discovery_cache/documents/domains.v1alpha2.json @@ -843,7 +843,7 @@ } } }, - "revision": "20230925", + "revision": "20231018", "rootUrl": "https://domains.googleapis.com/", "schemas": { "AuditConfig": { @@ -1834,7 +1834,8 @@ "IMPORT_PENDING", "ACTIVE", "SUSPENDED", - "EXPORTED" + "EXPORTED", + "EXPIRED" ], "enumDescriptions": [ "The state is undefined.", @@ -1845,7 +1846,8 @@ "The domain is being imported from Google Domains to Cloud Domains.", "The domain is registered and operational. The domain renews automatically as long as it remains in this state.", "The domain is suspended and inoperative. For more details, see the `issues` field.", - "The domain is no longer managed with Cloud Domains. It may have been transferred to another registrar or exported for management in [Google Domains](https://domains.google/). You can no longer update it with this API, and information shown about it may be stale. Domains in this state are not automatically renewed by Cloud Domains." + "The domain is no longer managed with Cloud Domains. It may have been transferred to another registrar or exported for management in [Google Domains](https://domains.google/). You can no longer update it with this API, and information shown about it may be stale. Domains in this state are not automatically renewed by Cloud Domains.", + "The domain is expired." ], "readOnly": true, "type": "string" diff --git a/googleapiclient/discovery_cache/documents/domains.v1beta1.json b/googleapiclient/discovery_cache/documents/domains.v1beta1.json index 72d2ad55943..39fef61044e 100644 --- a/googleapiclient/discovery_cache/documents/domains.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/domains.v1beta1.json @@ -843,7 +843,7 @@ } } }, - "revision": "20230925", + "revision": "20231018", "rootUrl": "https://domains.googleapis.com/", "schemas": { "AuditConfig": { @@ -1834,7 +1834,8 @@ "IMPORT_PENDING", "ACTIVE", "SUSPENDED", - "EXPORTED" + "EXPORTED", + "EXPIRED" ], "enumDescriptions": [ "The state is undefined.", @@ -1845,7 +1846,8 @@ "The domain is being imported from Google Domains to Cloud Domains.", "The domain is registered and operational. The domain renews automatically as long as it remains in this state.", "The domain is suspended and inoperative. For more details, see the `issues` field.", - "The domain is no longer managed with Cloud Domains. It may have been transferred to another registrar or exported for management in [Google Domains](https://domains.google/). You can no longer update it with this API, and information shown about it may be stale. Domains in this state are not automatically renewed by Cloud Domains." + "The domain is no longer managed with Cloud Domains. It may have been transferred to another registrar or exported for management in [Google Domains](https://domains.google/). You can no longer update it with this API, and information shown about it may be stale. Domains in this state are not automatically renewed by Cloud Domains.", + "The domain is expired." ], "readOnly": true, "type": "string" diff --git a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json index ba959122c1c..4946db4eeff 100644 --- a/googleapiclient/discovery_cache/documents/domainsrdap.v1.json +++ b/googleapiclient/discovery_cache/documents/domainsrdap.v1.json @@ -289,7 +289,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://domainsrdap.googleapis.com/", "schemas": { "HttpBody": { diff --git a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v2.json b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v2.json index 67b2a11a629..869cb02f779 100644 --- a/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v2.json +++ b/googleapiclient/discovery_cache/documents/doubleclickbidmanager.v2.json @@ -319,7 +319,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://doubleclickbidmanager.googleapis.com/", "schemas": { "ChannelGrouping": { diff --git a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json index fd999b512c6..a7ca060bb2a 100644 --- a/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json +++ b/googleapiclient/discovery_cache/documents/doubleclicksearch.v2.json @@ -543,7 +543,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://doubleclicksearch.googleapis.com/", "schemas": { "Availability": { diff --git a/googleapiclient/discovery_cache/documents/drive.v2.json b/googleapiclient/discovery_cache/documents/drive.v2.json index 283b8f7abc9..f3939baae31 100644 --- a/googleapiclient/discovery_cache/documents/drive.v2.json +++ b/googleapiclient/discovery_cache/documents/drive.v2.json @@ -3842,7 +3842,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://www.googleapis.com/", "schemas": { "About": { diff --git a/googleapiclient/discovery_cache/documents/drive.v3.json b/googleapiclient/discovery_cache/documents/drive.v3.json index e382b754fdd..5d67f6af7da 100644 --- a/googleapiclient/discovery_cache/documents/drive.v3.json +++ b/googleapiclient/discovery_cache/documents/drive.v3.json @@ -2433,7 +2433,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://www.googleapis.com/", "schemas": { "About": { diff --git a/googleapiclient/discovery_cache/documents/driveactivity.v2.json b/googleapiclient/discovery_cache/documents/driveactivity.v2.json index 8de764beb1b..c05c3c21eeb 100644 --- a/googleapiclient/discovery_cache/documents/driveactivity.v2.json +++ b/googleapiclient/discovery_cache/documents/driveactivity.v2.json @@ -132,7 +132,7 @@ } } }, - "revision": "20231021", + "revision": "20231029", "rootUrl": "https://driveactivity.googleapis.com/", "schemas": { "Action": { diff --git a/googleapiclient/discovery_cache/documents/drivelabels.v2.json b/googleapiclient/discovery_cache/documents/drivelabels.v2.json index 9d4eba35171..c2a93750405 100644 --- a/googleapiclient/discovery_cache/documents/drivelabels.v2.json +++ b/googleapiclient/discovery_cache/documents/drivelabels.v2.json @@ -1032,7 +1032,7 @@ } } }, - "revision": "20231018", + "revision": "20231025", "rootUrl": "https://drivelabels.googleapis.com/", "schemas": { "GoogleAppsDriveLabelsV2BadgeColors": { diff --git a/googleapiclient/discovery_cache/documents/drivelabels.v2beta.json b/googleapiclient/discovery_cache/documents/drivelabels.v2beta.json index d9582b3bc60..e95233d0b7f 100644 --- a/googleapiclient/discovery_cache/documents/drivelabels.v2beta.json +++ b/googleapiclient/discovery_cache/documents/drivelabels.v2beta.json @@ -1032,7 +1032,7 @@ } } }, - "revision": "20231018", + "revision": "20231025", "rootUrl": "https://drivelabels.googleapis.com/", "schemas": { "GoogleAppsDriveLabelsV2betaBadgeColors": { diff --git a/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json b/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json index 30dee019a76..7e8262fc0fb 100644 --- a/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json +++ b/googleapiclient/discovery_cache/documents/essentialcontacts.v1.json @@ -850,7 +850,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://essentialcontacts.googleapis.com/", "schemas": { "GoogleCloudEssentialcontactsV1ComputeContactsResponse": { diff --git a/googleapiclient/discovery_cache/documents/eventarc.v1.json b/googleapiclient/discovery_cache/documents/eventarc.v1.json index b52e4b0cdc7..07b2641dbee 100644 --- a/googleapiclient/discovery_cache/documents/eventarc.v1.json +++ b/googleapiclient/discovery_cache/documents/eventarc.v1.json @@ -1197,7 +1197,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://eventarc.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/eventarc.v1beta1.json b/googleapiclient/discovery_cache/documents/eventarc.v1beta1.json index 55ba968eaa3..717be739ff2 100644 --- a/googleapiclient/discovery_cache/documents/eventarc.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/eventarc.v1beta1.json @@ -584,7 +584,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://eventarc.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json index 78b922982e7..747eb32566e 100644 --- a/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/factchecktools.v1alpha1.json @@ -304,7 +304,7 @@ } } }, - "revision": "20231015", + "revision": "20231029", "rootUrl": "https://factchecktools.googleapis.com/", "schemas": { "GoogleFactcheckingFactchecktoolsV1alpha1Claim": { diff --git a/googleapiclient/discovery_cache/documents/fcm.v1.json b/googleapiclient/discovery_cache/documents/fcm.v1.json index 7cea7802ffc..b7de2874dc5 100644 --- a/googleapiclient/discovery_cache/documents/fcm.v1.json +++ b/googleapiclient/discovery_cache/documents/fcm.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231020", + "revision": "20231027", "rootUrl": "https://fcm.googleapis.com/", "schemas": { "AndroidConfig": { diff --git a/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json b/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json index 50bd4e48421..e46b34fe967 100644 --- a/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/fcmdata.v1beta1.json @@ -154,7 +154,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://fcmdata.googleapis.com/", "schemas": { "GoogleFirebaseFcmDataV1beta1AndroidDeliveryData": { diff --git a/googleapiclient/discovery_cache/documents/file.v1.json b/googleapiclient/discovery_cache/documents/file.v1.json index 174bf4dada6..30432bae7f7 100644 --- a/googleapiclient/discovery_cache/documents/file.v1.json +++ b/googleapiclient/discovery_cache/documents/file.v1.json @@ -874,7 +874,7 @@ } } }, - "revision": "20231008", + "revision": "20231019", "rootUrl": "https://file.googleapis.com/", "schemas": { "Backup": { @@ -942,7 +942,8 @@ "BASIC_SSD", "HIGH_SCALE_SSD", "ENTERPRISE", - "ZONAL" + "ZONAL", + "REGIONAL" ], "enumDescriptions": [ "Not set.", @@ -952,7 +953,8 @@ "BASIC instances offer a maximum capacity of 63.9 TB. BASIC_SSD is an alias for PREMIUM Tier, and offers improved performance backed by SSD.", "HIGH_SCALE instances offer expanded capacity and performance scaling capabilities.", "ENTERPRISE instances offer the features and availability needed for mission-critical workloads.", - "ZONAL instances offer expanded capacity and performance scaling capabilities." + "ZONAL instances offer expanded capacity and performance scaling capabilities.", + "REGIONAL instances offer the features and availability needed for mission-critical workloads." ], "readOnly": true, "type": "string" @@ -1473,7 +1475,8 @@ "BASIC_SSD", "HIGH_SCALE_SSD", "ENTERPRISE", - "ZONAL" + "ZONAL", + "REGIONAL" ], "enumDescriptions": [ "Not set.", @@ -1483,7 +1486,8 @@ "BASIC instances offer a maximum capacity of 63.9 TB. BASIC_SSD is an alias for PREMIUM Tier, and offers improved performance backed by SSD.", "HIGH_SCALE instances offer expanded capacity and performance scaling capabilities.", "ENTERPRISE instances offer the features and availability needed for mission-critical workloads.", - "ZONAL instances offer expanded capacity and performance scaling capabilities." + "ZONAL instances offer expanded capacity and performance scaling capabilities.", + "REGIONAL instances offer the features and availability needed for mission-critical workloads." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/file.v1beta1.json b/googleapiclient/discovery_cache/documents/file.v1beta1.json index 370c77ae739..c0853df1f49 100644 --- a/googleapiclient/discovery_cache/documents/file.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/file.v1beta1.json @@ -1041,7 +1041,7 @@ } } }, - "revision": "20231008", + "revision": "20231019", "rootUrl": "https://file.googleapis.com/", "schemas": { "Backup": { @@ -1109,7 +1109,8 @@ "BASIC_SSD", "HIGH_SCALE_SSD", "ENTERPRISE", - "ZONAL" + "ZONAL", + "REGIONAL" ], "enumDescriptions": [ "Not set.", @@ -1119,7 +1120,8 @@ "BASIC instances offer a maximum capacity of 63.9 TB. BASIC_SSD is an alias for PREMIUM Tier, and offers improved performance backed by SSD.", "HIGH_SCALE instances offer expanded capacity and performance scaling capabilities.", "ENTERPRISE instances offer the features and availability needed for mission-critical workloads.", - "ZONAL instances offer expanded capacity and performance scaling capabilities." + "ZONAL instances offer expanded capacity and performance scaling capabilities.", + "REGIONAL instances offer the features and availability needed for mission-critical workloads." ], "readOnly": true, "type": "string" @@ -1695,7 +1697,8 @@ "BASIC_SSD", "HIGH_SCALE_SSD", "ENTERPRISE", - "ZONAL" + "ZONAL", + "REGIONAL" ], "enumDescriptions": [ "Not set.", @@ -1705,7 +1708,8 @@ "BASIC instances offer a maximum capacity of 63.9 TB. BASIC_SSD is an alias for PREMIUM Tier, and offers improved performance backed by SSD.", "HIGH_SCALE instances offer expanded capacity and performance scaling capabilities.", "ENTERPRISE instances offer the features and availability needed for mission-critical workloads.", - "ZONAL instances offer expanded capacity and performance scaling capabilities." + "ZONAL instances offer expanded capacity and performance scaling capabilities.", + "REGIONAL instances offer the features and availability needed for mission-critical workloads." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1.json b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1.json index aa65c1b8db8..32d2d99c77d 100644 --- a/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1.json +++ b/googleapiclient/discovery_cache/documents/firebaseappdistribution.v1.json @@ -941,7 +941,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://firebaseappdistribution.googleapis.com/", "schemas": { "GdataBlobstore2Info": { diff --git a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json index 51cbd644e43..e042550b48c 100644 --- a/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebasedatabase.v1beta.json @@ -351,7 +351,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://firebasedatabase.googleapis.com/", "schemas": { "DatabaseInstance": { diff --git a/googleapiclient/discovery_cache/documents/firebasehosting.v1.json b/googleapiclient/discovery_cache/documents/firebasehosting.v1.json index d5dfa12688c..148ee506cce 100644 --- a/googleapiclient/discovery_cache/documents/firebasehosting.v1.json +++ b/googleapiclient/discovery_cache/documents/firebasehosting.v1.json @@ -269,7 +269,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://firebasehosting.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json b/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json index b6e7f45f89a..7f37711042c 100644 --- a/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/firebasehosting.v1beta1.json @@ -2422,7 +2422,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://firebasehosting.googleapis.com/", "schemas": { "ActingUser": { diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1.json b/googleapiclient/discovery_cache/documents/firebaseml.v1.json index eb8dfa8c7dc..cb08e4bb991 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v1.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v1.json @@ -204,7 +204,7 @@ } } }, - "revision": "20231018", + "revision": "20231025", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json index 148e36f2a1e..89624351528 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v1beta2.json @@ -318,7 +318,7 @@ } } }, - "revision": "20231018", + "revision": "20231025", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "DownloadModelResponse": { diff --git a/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json b/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json index 275977b6b44..f16f477cb60 100644 --- a/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebasestorage.v1beta.json @@ -238,7 +238,7 @@ } } }, - "revision": "20231006", + "revision": "20231013", "rootUrl": "https://firebasestorage.googleapis.com/", "schemas": { "AddFirebaseRequest": { diff --git a/googleapiclient/discovery_cache/documents/firestore.v1.json b/googleapiclient/discovery_cache/documents/firestore.v1.json index c44f229567a..125ccf9abf6 100644 --- a/googleapiclient/discovery_cache/documents/firestore.v1.json +++ b/googleapiclient/discovery_cache/documents/firestore.v1.json @@ -1672,7 +1672,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://firestore.googleapis.com/", "schemas": { "Aggregation": { diff --git a/googleapiclient/discovery_cache/documents/firestore.v1beta1.json b/googleapiclient/discovery_cache/documents/firestore.v1beta1.json index 72439f767c2..b5175f64656 100644 --- a/googleapiclient/discovery_cache/documents/firestore.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/firestore.v1beta1.json @@ -950,7 +950,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://firestore.googleapis.com/", "schemas": { "Aggregation": { diff --git a/googleapiclient/discovery_cache/documents/firestore.v1beta2.json b/googleapiclient/discovery_cache/documents/firestore.v1beta2.json index b2ded35d132..f193582db72 100644 --- a/googleapiclient/discovery_cache/documents/firestore.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/firestore.v1beta2.json @@ -415,7 +415,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://firestore.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/fitness.v1.json b/googleapiclient/discovery_cache/documents/fitness.v1.json index 127f42867da..0ee34137350 100644 --- a/googleapiclient/discovery_cache/documents/fitness.v1.json +++ b/googleapiclient/discovery_cache/documents/fitness.v1.json @@ -831,7 +831,7 @@ } } }, - "revision": "20231017", + "revision": "20231025", "rootUrl": "https://fitness.googleapis.com/", "schemas": { "AggregateBucket": { diff --git a/googleapiclient/discovery_cache/documents/forms.v1.json b/googleapiclient/discovery_cache/documents/forms.v1.json index b1894332972..906d46fa238 100644 --- a/googleapiclient/discovery_cache/documents/forms.v1.json +++ b/googleapiclient/discovery_cache/documents/forms.v1.json @@ -423,7 +423,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://forms.googleapis.com/", "schemas": { "Answer": { diff --git a/googleapiclient/discovery_cache/documents/games.v1.json b/googleapiclient/discovery_cache/documents/games.v1.json index 4f777e1ea5d..6ca35f9f055 100644 --- a/googleapiclient/discovery_cache/documents/games.v1.json +++ b/googleapiclient/discovery_cache/documents/games.v1.json @@ -1354,7 +1354,7 @@ } } }, - "revision": "20231017", + "revision": "20231025", "rootUrl": "https://games.googleapis.com/", "schemas": { "AchievementDefinition": { diff --git a/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json b/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json index c1ff3089d2a..6b3aa04bf5a 100644 --- a/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json +++ b/googleapiclient/discovery_cache/documents/gamesConfiguration.v1configuration.json @@ -381,7 +381,7 @@ } } }, - "revision": "20231017", + "revision": "20231025", "rootUrl": "https://gamesconfiguration.googleapis.com/", "schemas": { "AchievementConfiguration": { diff --git a/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json b/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json index 0d44504bfbe..8eba752fbf4 100644 --- a/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json +++ b/googleapiclient/discovery_cache/documents/gamesManagement.v1management.json @@ -471,7 +471,7 @@ } } }, - "revision": "20231017", + "revision": "20231025", "rootUrl": "https://gamesmanagement.googleapis.com/", "schemas": { "AchievementResetAllResponse": { diff --git a/googleapiclient/discovery_cache/documents/gkebackup.v1.json b/googleapiclient/discovery_cache/documents/gkebackup.v1.json index 5b2625e05bc..938814dd1d3 100644 --- a/googleapiclient/discovery_cache/documents/gkebackup.v1.json +++ b/googleapiclient/discovery_cache/documents/gkebackup.v1.json @@ -1688,7 +1688,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://gkebackup.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1.json b/googleapiclient/discovery_cache/documents/gkehub.v1.json index b8fdac5d8d5..10b97cea35b 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1.json @@ -1834,7 +1834,7 @@ } } }, - "revision": "20231013", + "revision": "20231022", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceFeatureSpec": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json index 90b11806632..24f20e8087a 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json @@ -2414,7 +2414,7 @@ } } }, - "revision": "20231013", + "revision": "20231022", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AnthosObservabilityFeatureSpec": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json index e0f2247ba95..160f6df68da 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha2.json @@ -657,7 +657,7 @@ } } }, - "revision": "20231013", + "revision": "20231022", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "ApplianceCluster": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json index d771fa71bda..7191886f214 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json @@ -2340,7 +2340,7 @@ } } }, - "revision": "20231013", + "revision": "20231022", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AnthosObservabilityFeatureSpec": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json index 4160181994a..d657f8dc97d 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json @@ -712,7 +712,7 @@ } } }, - "revision": "20231013", + "revision": "20231022", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "ApplianceCluster": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json index 106ae485d3f..cce144465a9 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json @@ -280,7 +280,7 @@ } } }, - "revision": "20231013", + "revision": "20231022", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/gkeonprem.v1.json b/googleapiclient/discovery_cache/documents/gkeonprem.v1.json index f6d4f9af9b6..59e47b3b6e2 100644 --- a/googleapiclient/discovery_cache/documents/gkeonprem.v1.json +++ b/googleapiclient/discovery_cache/documents/gkeonprem.v1.json @@ -2996,7 +2996,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://gkeonprem.googleapis.com/", "schemas": { "Authorization": { diff --git a/googleapiclient/discovery_cache/documents/gmail.v1.json b/googleapiclient/discovery_cache/documents/gmail.v1.json index c01d5a0243a..cc583a2d789 100644 --- a/googleapiclient/discovery_cache/documents/gmail.v1.json +++ b/googleapiclient/discovery_cache/documents/gmail.v1.json @@ -3077,7 +3077,7 @@ } } }, - "revision": "20231009", + "revision": "20231023", "rootUrl": "https://gmail.googleapis.com/", "schemas": { "AutoForwarding": { @@ -3233,6 +3233,10 @@ "description": "Metadata for a private key instance.", "id": "CsePrivateKeyMetadata", "properties": { + "hardwareKeyMetadata": { + "$ref": "HardwareKeyMetadata", + "description": "Metadata for hardware keys." + }, "kaclsKeyMetadata": { "$ref": "KaclsKeyMetadata", "description": "Metadata for a private key instance managed by an external key access control list service." @@ -3429,6 +3433,17 @@ }, "type": "object" }, + "HardwareKeyMetadata": { + "description": "Metadata for hardware keys.", + "id": "HardwareKeyMetadata", + "properties": { + "description": { + "description": "Description about the hardware key.", + "type": "string" + } + }, + "type": "object" + }, "History": { "description": "A record of a change to the user's mailbox. Each history change may affect multiple messages in multiple ways.", "id": "History", diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json index 6d641bfbb32..175e6671677 100644 --- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json +++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1.json @@ -265,7 +265,7 @@ } } }, - "revision": "20231022", + "revision": "20231026", "rootUrl": "https://gmailpostmastertools.googleapis.com/", "schemas": { "DeliveryError": { diff --git a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json index 9c7ad397caa..ff19d4f5c3e 100644 --- a/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/gmailpostmastertools.v1beta1.json @@ -265,7 +265,7 @@ } } }, - "revision": "20231022", + "revision": "20231026", "rootUrl": "https://gmailpostmastertools.googleapis.com/", "schemas": { "DeliveryError": { diff --git a/googleapiclient/discovery_cache/documents/groupsmigration.v1.json b/googleapiclient/discovery_cache/documents/groupsmigration.v1.json index 28c352162e4..a358c339350 100644 --- a/googleapiclient/discovery_cache/documents/groupsmigration.v1.json +++ b/googleapiclient/discovery_cache/documents/groupsmigration.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231005", + "revision": "20231012", "rootUrl": "https://groupsmigration.googleapis.com/", "schemas": { "Groups": { diff --git a/googleapiclient/discovery_cache/documents/healthcare.v1.json b/googleapiclient/discovery_cache/documents/healthcare.v1.json index 552cc69d0ce..d6f7e593667 100644 --- a/googleapiclient/discovery_cache/documents/healthcare.v1.json +++ b/googleapiclient/discovery_cache/documents/healthcare.v1.json @@ -4431,7 +4431,7 @@ } } }, - "revision": "20231023", + "revision": "20231031", "rootUrl": "https://healthcare.googleapis.com/", "schemas": { "ActivateConsentRequest": { diff --git a/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json b/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json index 5ddf0547b57..7d837bfda9b 100644 --- a/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/healthcare.v1beta1.json @@ -5361,7 +5361,7 @@ } } }, - "revision": "20231023", + "revision": "20231031", "rootUrl": "https://healthcare.googleapis.com/", "schemas": { "AccessDeterminationLogConfig": { diff --git a/googleapiclient/discovery_cache/documents/homegraph.v1.json b/googleapiclient/discovery_cache/documents/homegraph.v1.json index 62b0d9cb93e..ee500d41e66 100644 --- a/googleapiclient/discovery_cache/documents/homegraph.v1.json +++ b/googleapiclient/discovery_cache/documents/homegraph.v1.json @@ -216,7 +216,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://homegraph.googleapis.com/", "schemas": { "AgentDeviceId": { diff --git a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json index e27ad7fc1b8..5d4f9b280cb 100644 --- a/googleapiclient/discovery_cache/documents/iamcredentials.v1.json +++ b/googleapiclient/discovery_cache/documents/iamcredentials.v1.json @@ -226,7 +226,7 @@ } } }, - "revision": "20231006", + "revision": "20231020", "rootUrl": "https://iamcredentials.googleapis.com/", "schemas": { "GenerateAccessTokenRequest": { diff --git a/googleapiclient/discovery_cache/documents/iap.v1beta1.json b/googleapiclient/discovery_cache/documents/iap.v1beta1.json index 33bc9cc5e48..46f21c4a8aa 100644 --- a/googleapiclient/discovery_cache/documents/iap.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/iap.v1beta1.json @@ -194,7 +194,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://iap.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/identitytoolkit.v1.json b/googleapiclient/discovery_cache/documents/identitytoolkit.v1.json index e4c57ba3f31..1b8f46c325f 100644 --- a/googleapiclient/discovery_cache/documents/identitytoolkit.v1.json +++ b/googleapiclient/discovery_cache/documents/identitytoolkit.v1.json @@ -1239,7 +1239,7 @@ } } }, - "revision": "20231016", + "revision": "20231027", "rootUrl": "https://identitytoolkit.googleapis.com/", "schemas": { "GoogleCloudIdentitytoolkitV1Argon2Parameters": { @@ -1477,7 +1477,7 @@ "type": "string" }, "signinMethods": { - "description": "The list of sign-in methods that the user has previously used. Each element is one of `password`, `emailLink`, or the provider ID of an IdP. Present only when a registered email identifier is set in the request.", + "description": "The list of sign-in methods that the user has previously used. Each element is one of `password`, `emailLink`, or the provider ID of an IdP. Present only when a registered email identifier is set in the request. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, this method returns an empty list.", "items": { "type": "string" }, @@ -2557,7 +2557,7 @@ "type": "string" }, "email": { - "description": "The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production.", + "description": "The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, the email cannot be changed by the user without verifying the email first, but it can be changed by an administrator.", "type": "string" }, "emailVerified": { diff --git a/googleapiclient/discovery_cache/documents/identitytoolkit.v2.json b/googleapiclient/discovery_cache/documents/identitytoolkit.v2.json index d6ce1c1ee35..c0e3bb17278 100644 --- a/googleapiclient/discovery_cache/documents/identitytoolkit.v2.json +++ b/googleapiclient/discovery_cache/documents/identitytoolkit.v2.json @@ -289,32 +289,6 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, - "getPasskeyConfig": { - "description": "Retrieve a passkey configuration for an Identity Toolkit project.", - "flatPath": "v2/projects/{projectsId}/passkeyConfig", - "httpMethod": "GET", - "id": "identitytoolkit.projects.getPasskeyConfig", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the config, for example: 'projects/my-awesome-project/passkeyConfig'.", - "location": "path", - "pattern": "^projects/[^/]+/passkeyConfig$", - "required": true, - "type": "string" - } - }, - "path": "v2/{+name}", - "response": { - "$ref": "GoogleCloudIdentitytoolkitAdminV2PasskeyConfig" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase" - ] - }, "updateConfig": { "description": "Update an Identity Toolkit project configuration.", "flatPath": "v2/projects/{projectsId}/config", @@ -349,41 +323,6 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/firebase" ] - }, - "updatePasskeyConfig": { - "description": "Update a passkey configuration for an Identity Toolkit project.", - "flatPath": "v2/projects/{projectsId}/passkeyConfig", - "httpMethod": "PATCH", - "id": "identitytoolkit.projects.updatePasskeyConfig", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the PasskeyConfig resource.", - "location": "path", - "pattern": "^projects/[^/]+/passkeyConfig$", - "required": true, - "type": "string" - }, - "updateMask": { - "description": "Optional. The update mask applies to the resource. Empty update mask will result in updating nothing. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", - "format": "google-fieldmask", - "location": "query", - "type": "string" - } - }, - "path": "v2/{+name}", - "request": { - "$ref": "GoogleCloudIdentitytoolkitAdminV2PasskeyConfig" - }, - "response": { - "$ref": "GoogleCloudIdentitytoolkitAdminV2PasskeyConfig" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase" - ] } }, "resources": { @@ -1017,32 +956,6 @@ "https://www.googleapis.com/auth/firebase" ] }, - "getPasskeyConfig": { - "description": "Retrieve a passkey configuration for an Identity Toolkit project.", - "flatPath": "v2/projects/{projectsId}/tenants/{tenantsId}/passkeyConfig", - "httpMethod": "GET", - "id": "identitytoolkit.projects.tenants.getPasskeyConfig", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The resource name of the config, for example: 'projects/my-awesome-project/passkeyConfig'.", - "location": "path", - "pattern": "^projects/[^/]+/tenants/[^/]+/passkeyConfig$", - "required": true, - "type": "string" - } - }, - "path": "v2/{+name}", - "response": { - "$ref": "GoogleCloudIdentitytoolkitAdminV2PasskeyConfig" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase" - ] - }, "list": { "description": "List tenants under the given agent project. Requires read permission on the Agent project.", "flatPath": "v2/projects/{projectsId}/tenants", @@ -1172,41 +1085,6 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/firebase" ] - }, - "updatePasskeyConfig": { - "description": "Update a passkey configuration for an Identity Toolkit project.", - "flatPath": "v2/projects/{projectsId}/tenants/{tenantsId}/passkeyConfig", - "httpMethod": "PATCH", - "id": "identitytoolkit.projects.tenants.updatePasskeyConfig", - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "description": "Required. The name of the PasskeyConfig resource.", - "location": "path", - "pattern": "^projects/[^/]+/tenants/[^/]+/passkeyConfig$", - "required": true, - "type": "string" - }, - "updateMask": { - "description": "Optional. The update mask applies to the resource. Empty update mask will result in updating nothing. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask", - "format": "google-fieldmask", - "location": "query", - "type": "string" - } - }, - "path": "v2/{+name}", - "request": { - "$ref": "GoogleCloudIdentitytoolkitAdminV2PasskeyConfig" - }, - "response": { - "$ref": "GoogleCloudIdentitytoolkitAdminV2PasskeyConfig" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/firebase" - ] } }, "resources": { @@ -1777,7 +1655,7 @@ } } }, - "revision": "20231016", + "revision": "20231027", "rootUrl": "https://identitytoolkit.googleapis.com/", "schemas": { "GoogleCloudIdentitytoolkitAdminV2AllowByDefault": { @@ -2606,28 +2484,6 @@ }, "type": "object" }, - "GoogleCloudIdentitytoolkitAdminV2PasskeyConfig": { - "description": "Configuration for signing in users using passkeys.", - "id": "GoogleCloudIdentitytoolkitAdminV2PasskeyConfig", - "properties": { - "expectedOrigins": { - "description": "Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys.", - "items": { - "type": "string" - }, - "type": "array" - }, - "name": { - "description": "Required. The name of the PasskeyConfig resource.", - "type": "string" - }, - "rpId": { - "description": "Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created.", - "type": "string" - } - }, - "type": "object" - }, "GoogleCloudIdentitytoolkitAdminV2PasswordPolicyConfig": { "description": "The configuration for the password policy on the project.", "id": "GoogleCloudIdentitytoolkitAdminV2PasswordPolicyConfig", diff --git a/googleapiclient/discovery_cache/documents/indexing.v3.json b/googleapiclient/discovery_cache/documents/indexing.v3.json index 657925f5d9c..efbe1933dd5 100644 --- a/googleapiclient/discovery_cache/documents/indexing.v3.json +++ b/googleapiclient/discovery_cache/documents/indexing.v3.json @@ -149,7 +149,7 @@ } } }, - "revision": "20231010", + "revision": "20231024", "rootUrl": "https://indexing.googleapis.com/", "schemas": { "PublishUrlNotificationResponse": { diff --git a/googleapiclient/discovery_cache/documents/jobs.v3.json b/googleapiclient/discovery_cache/documents/jobs.v3.json index ec4796a4982..fe64da6c5c3 100644 --- a/googleapiclient/discovery_cache/documents/jobs.v3.json +++ b/googleapiclient/discovery_cache/documents/jobs.v3.json @@ -652,7 +652,7 @@ } } }, - "revision": "20230925", + "revision": "20231018", "rootUrl": "https://jobs.googleapis.com/", "schemas": { "ApplicationInfo": { diff --git a/googleapiclient/discovery_cache/documents/jobs.v4.json b/googleapiclient/discovery_cache/documents/jobs.v4.json index 94742e9e4eb..ea8cf1a5eb7 100644 --- a/googleapiclient/discovery_cache/documents/jobs.v4.json +++ b/googleapiclient/discovery_cache/documents/jobs.v4.json @@ -903,7 +903,7 @@ } } }, - "revision": "20230925", + "revision": "20231018", "rootUrl": "https://jobs.googleapis.com/", "schemas": { "ApplicationInfo": { diff --git a/googleapiclient/discovery_cache/documents/keep.v1.json b/googleapiclient/discovery_cache/documents/keep.v1.json index d5f3384735d..2af70fc7ae0 100644 --- a/googleapiclient/discovery_cache/documents/keep.v1.json +++ b/googleapiclient/discovery_cache/documents/keep.v1.json @@ -314,7 +314,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://keep.googleapis.com/", "schemas": { "Attachment": { diff --git a/googleapiclient/discovery_cache/documents/kmsinventory.v1.json b/googleapiclient/discovery_cache/documents/kmsinventory.v1.json index ce16a34be54..7366dd2e6d1 100644 --- a/googleapiclient/discovery_cache/documents/kmsinventory.v1.json +++ b/googleapiclient/discovery_cache/documents/kmsinventory.v1.json @@ -242,7 +242,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://kmsinventory.googleapis.com/", "schemas": { "GoogleCloudKmsInventoryV1ListCryptoKeysResponse": { diff --git a/googleapiclient/discovery_cache/documents/language.v1.json b/googleapiclient/discovery_cache/documents/language.v1.json index 9a590d6d0e7..bc06fc33db1 100644 --- a/googleapiclient/discovery_cache/documents/language.v1.json +++ b/googleapiclient/discovery_cache/documents/language.v1.json @@ -246,7 +246,7 @@ } } }, - "revision": "20231014", + "revision": "20231021", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { diff --git a/googleapiclient/discovery_cache/documents/language.v1beta2.json b/googleapiclient/discovery_cache/documents/language.v1beta2.json index a24414ae9cb..c8811b9788d 100644 --- a/googleapiclient/discovery_cache/documents/language.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/language.v1beta2.json @@ -246,7 +246,7 @@ } } }, - "revision": "20231014", + "revision": "20231021", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { diff --git a/googleapiclient/discovery_cache/documents/language.v2.json b/googleapiclient/discovery_cache/documents/language.v2.json index 9ecfcd50bcb..83b25cc2be8 100644 --- a/googleapiclient/discovery_cache/documents/language.v2.json +++ b/googleapiclient/discovery_cache/documents/language.v2.json @@ -208,7 +208,7 @@ } } }, - "revision": "20231014", + "revision": "20231021", "rootUrl": "https://language.googleapis.com/", "schemas": { "AnalyzeEntitiesRequest": { diff --git a/googleapiclient/discovery_cache/documents/libraryagent.v1.json b/googleapiclient/discovery_cache/documents/libraryagent.v1.json index a94796145dd..832f2b7c644 100644 --- a/googleapiclient/discovery_cache/documents/libraryagent.v1.json +++ b/googleapiclient/discovery_cache/documents/libraryagent.v1.json @@ -279,7 +279,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://libraryagent.googleapis.com/", "schemas": { "GoogleExampleLibraryagentV1Book": { diff --git a/googleapiclient/discovery_cache/documents/licensing.v1.json b/googleapiclient/discovery_cache/documents/licensing.v1.json index 3aae0b98e76..5bf55d8025c 100644 --- a/googleapiclient/discovery_cache/documents/licensing.v1.json +++ b/googleapiclient/discovery_cache/documents/licensing.v1.json @@ -400,7 +400,7 @@ } } }, - "revision": "20231021", + "revision": "20231023", "rootUrl": "https://licensing.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json b/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json index e2927b70d0b..30e56a8281b 100644 --- a/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json +++ b/googleapiclient/discovery_cache/documents/lifesciences.v2beta.json @@ -312,7 +312,7 @@ } } }, - "revision": "20231017", + "revision": "20231020", "rootUrl": "https://lifesciences.googleapis.com/", "schemas": { "Accelerator": { diff --git a/googleapiclient/discovery_cache/documents/localservices.v1.json b/googleapiclient/discovery_cache/documents/localservices.v1.json index 77233b93609..425bcd8f096 100644 --- a/googleapiclient/discovery_cache/documents/localservices.v1.json +++ b/googleapiclient/discovery_cache/documents/localservices.v1.json @@ -250,7 +250,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://localservices.googleapis.com/", "schemas": { "GoogleAdsHomeservicesLocalservicesV1AccountReport": { diff --git a/googleapiclient/discovery_cache/documents/memcache.v1.json b/googleapiclient/discovery_cache/documents/memcache.v1.json index 85c04410c8f..f38eadfe2bb 100644 --- a/googleapiclient/discovery_cache/documents/memcache.v1.json +++ b/googleapiclient/discovery_cache/documents/memcache.v1.json @@ -584,7 +584,7 @@ } } }, - "revision": "20230907", + "revision": "20231012", "rootUrl": "https://memcache.googleapis.com/", "schemas": { "ApplyParametersRequest": { diff --git a/googleapiclient/discovery_cache/documents/memcache.v1beta2.json b/googleapiclient/discovery_cache/documents/memcache.v1beta2.json index 9bf66bfbac1..65ea039944b 100644 --- a/googleapiclient/discovery_cache/documents/memcache.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/memcache.v1beta2.json @@ -612,7 +612,7 @@ } } }, - "revision": "20230907", + "revision": "20231012", "rootUrl": "https://memcache.googleapis.com/", "schemas": { "ApplyParametersRequest": { diff --git a/googleapiclient/discovery_cache/documents/metastore.v1.json b/googleapiclient/discovery_cache/documents/metastore.v1.json index fec7d807f23..fff3cd68181 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1.json @@ -1339,7 +1339,7 @@ } } }, - "revision": "20231010", + "revision": "20231023", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AlterMetadataResourceLocationRequest": { @@ -1569,6 +1569,17 @@ }, "type": "object" }, + "DataCatalogConfig": { + "description": "Specifies how metastore metadata should be integrated with the Data Catalog service.", + "id": "DataCatalogConfig", + "properties": { + "enabled": { + "description": "Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.", + "type": "boolean" + } + }, + "type": "object" + }, "DatabaseDump": { "description": "A specification of the location of and metadata about a database dump from a relational database management system.", "id": "DatabaseDump", @@ -2187,6 +2198,17 @@ }, "type": "object" }, + "MetadataIntegration": { + "description": "Specifies how metastore metadata should be integrated with external services.", + "id": "MetadataIntegration", + "properties": { + "dataCatalogConfig": { + "$ref": "DataCatalogConfig", + "description": "Optional. The integration config for the Data Catalog service." + } + }, + "type": "object" + }, "MetadataManagementActivity": { "description": "The metadata management activities of the metastore service.", "id": "MetadataManagementActivity", @@ -2583,6 +2605,10 @@ "$ref": "MaintenanceWindow", "description": "The one hour maintenance window of the metastore service. This specifies when the service can be restarted for maintenance purposes in UTC time. Maintenance window is not needed for services with the SPANNER database type." }, + "metadataIntegration": { + "$ref": "MetadataIntegration", + "description": "Optional. The setting that defines how metastore metadata should be integrated with external services and systems." + }, "metadataManagementActivity": { "$ref": "MetadataManagementActivity", "description": "Output only. The metadata management activities of the metastore service.", diff --git a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json index c1261d8f5f5..ce758eb075d 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json @@ -1579,7 +1579,7 @@ } } }, - "revision": "20231010", + "revision": "20231023", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AlterMetadataResourceLocationRequest": { @@ -1816,7 +1816,7 @@ "id": "DataCatalogConfig", "properties": { "enabled": { - "description": "Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.", + "description": "Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.", "type": "boolean" } }, @@ -2471,7 +2471,7 @@ "properties": { "dataCatalogConfig": { "$ref": "DataCatalogConfig", - "description": "The integration config for the Data Catalog service." + "description": "Optional. The integration config for the Data Catalog service." }, "dataplexConfig": { "$ref": "DataplexConfig", @@ -2904,7 +2904,7 @@ }, "metadataIntegration": { "$ref": "MetadataIntegration", - "description": "The setting that defines how metastore metadata should be integrated with external services and systems." + "description": "Optional. The setting that defines how metastore metadata should be integrated with external services and systems." }, "metadataManagementActivity": { "$ref": "MetadataManagementActivity", diff --git a/googleapiclient/discovery_cache/documents/metastore.v1beta.json b/googleapiclient/discovery_cache/documents/metastore.v1beta.json index 0027c1d8169..ca33f2da645 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1beta.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1beta.json @@ -1579,7 +1579,7 @@ } } }, - "revision": "20231010", + "revision": "20231023", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AlterMetadataResourceLocationRequest": { @@ -1816,7 +1816,7 @@ "id": "DataCatalogConfig", "properties": { "enabled": { - "description": "Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.", + "description": "Optional. Defines whether the metastore metadata should be synced to Data Catalog. The default value is to disable syncing metastore metadata to Data Catalog.", "type": "boolean" } }, @@ -2471,7 +2471,7 @@ "properties": { "dataCatalogConfig": { "$ref": "DataCatalogConfig", - "description": "The integration config for the Data Catalog service." + "description": "Optional. The integration config for the Data Catalog service." }, "dataplexConfig": { "$ref": "DataplexConfig", @@ -2904,7 +2904,7 @@ }, "metadataIntegration": { "$ref": "MetadataIntegration", - "description": "The setting that defines how metastore metadata should be integrated with external services and systems." + "description": "Optional. The setting that defines how metastore metadata should be integrated with external services and systems." }, "metadataManagementActivity": { "$ref": "MetadataManagementActivity", diff --git a/googleapiclient/discovery_cache/documents/migrationcenter.v1.json b/googleapiclient/discovery_cache/documents/migrationcenter.v1.json index ecdcd68f7d1..a560368cd6c 100644 --- a/googleapiclient/discovery_cache/documents/migrationcenter.v1.json +++ b/googleapiclient/discovery_cache/documents/migrationcenter.v1.json @@ -2099,7 +2099,7 @@ } } }, - "revision": "20231013", + "revision": "20231024", "rootUrl": "https://migrationcenter.googleapis.com/", "schemas": { "AddAssetsToGroupRequest": { diff --git a/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json b/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json index 508533558a7..cef605e4788 100644 --- a/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/migrationcenter.v1alpha1.json @@ -2102,7 +2102,7 @@ } } }, - "revision": "20231013", + "revision": "20231024", "rootUrl": "https://migrationcenter.googleapis.com/", "schemas": { "AddAssetsToGroupRequest": { diff --git a/googleapiclient/discovery_cache/documents/monitoring.v1.json b/googleapiclient/discovery_cache/documents/monitoring.v1.json index fe29eb5cc4d..fbfa20e9887 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v1.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v1.json @@ -753,7 +753,7 @@ } } }, - "revision": "20231016", + "revision": "20231023", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { diff --git a/googleapiclient/discovery_cache/documents/monitoring.v3.json b/googleapiclient/discovery_cache/documents/monitoring.v3.json index ef72f6d4d41..d82e72de8e1 100644 --- a/googleapiclient/discovery_cache/documents/monitoring.v3.json +++ b/googleapiclient/discovery_cache/documents/monitoring.v3.json @@ -1788,7 +1788,7 @@ "timeSeries": { "methods": { "create": { - "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.", + "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response. This method does not support resource locations constraint of an organization policy (https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).", "flatPath": "v3/projects/{projectsId}/timeSeries", "httpMethod": "POST", "id": "monitoring.projects.timeSeries.create", @@ -2714,7 +2714,7 @@ } } }, - "revision": "20231016", + "revision": "20231023", "rootUrl": "https://monitoring.googleapis.com/", "schemas": { "Aggregation": { @@ -3667,7 +3667,7 @@ "id": "ForecastOptions", "properties": { "forecastHorizon": { - "description": "Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing.", + "description": "Required. The length of time into the future to forecast whether a time series will violate the threshold. If the predicted value is found to violate the threshold, and the violation is observed in all forecasts made for the configured duration, then the time series is considered to be failing. The forecast horizon can range from 1 hour to 60 hours.", "format": "google-duration", "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json index 16ea16836b9..922e3aeb742 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessaccountmanagement.v1.json @@ -530,7 +530,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://mybusinessaccountmanagement.googleapis.com/", "schemas": { "AcceptInvitationRequest": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json index 41d39498f3d..1de582a3948 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessbusinessinformation.v1.json @@ -612,7 +612,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://mybusinessbusinessinformation.googleapis.com/", "schemas": { "AdWordsLocationExtensions": { diff --git a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json index 71812d777e9..a44b3b69b6c 100644 --- a/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinesslodging.v1.json @@ -194,7 +194,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://mybusinesslodging.googleapis.com/", "schemas": { "Accessibility": { @@ -787,7 +787,8 @@ "type": "object" }, "EcoCertification": { - "description": "An eco certificate awarded to the hotel.", + "deprecated": true, + "description": "An eco certificate awarded to the hotel. Deprecated: this message is no longer populated. All certification data is now provided by BeCause.", "id": "EcoCertification", "properties": { "awarded": { @@ -982,7 +983,7 @@ "type": "string" }, "greenBuildingDesign": { - "description": "Output only. Green building design. True if BREEAM-* or LEED-* certified.", + "description": "Output only. Green building design. True if the property has been awarded a relevant certification.", "readOnly": true, "type": "boolean" }, @@ -4935,7 +4936,8 @@ }, "sustainabilityCertifications": { "$ref": "SustainabilityCertifications", - "description": "Sustainability certifications the hotel has been awarded." + "deprecated": true, + "description": "Sustainability certifications the hotel has been awarded. Deprecated: this field is no longer populated. All certification data is now provided by BeCause." }, "sustainableSourcing": { "$ref": "SustainableSourcing", @@ -4953,7 +4955,8 @@ "type": "object" }, "SustainabilityCertifications": { - "description": "Sustainability certifications the hotel has been awarded.", + "deprecated": true, + "description": "Sustainability certifications the hotel has been awarded. Deprecated: this message is no longer populated. All certification data is now provided by BeCause.", "id": "SustainabilityCertifications", "properties": { "breeamCertification": { @@ -5002,8 +5005,7 @@ "type": "array" }, "leedCertification": { - "deprecated": true, - "description": "LEED certification. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC.", + "description": "LEED certification.", "enum": [ "LEED_CERTIFICATION_UNSPECIFIED", "NO_LEED_CERTIFICATION", @@ -5023,8 +5025,7 @@ "type": "string" }, "leedCertificationException": { - "deprecated": true, - "description": "LEED certification exception. Deprecated: this field is no longer populated. LEED certification status is now provided directly by USGBC.", + "description": "LEED certification exception.", "enum": [ "EXCEPTION_UNSPECIFIED", "UNDER_CONSTRUCTION", diff --git a/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json b/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json index 47ca8292940..664ac24c801 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessnotifications.v1.json @@ -154,7 +154,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://mybusinessnotifications.googleapis.com/", "schemas": { "NotificationSetting": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json index 6d96a51cca2..05b1c540b05 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessplaceactions.v1.json @@ -281,7 +281,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://mybusinessplaceactions.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessqanda.v1.json b/googleapiclient/discovery_cache/documents/mybusinessqanda.v1.json index 51e2e3a3d0d..69fc2882d26 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessqanda.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessqanda.v1.json @@ -323,7 +323,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://mybusinessqanda.googleapis.com/", "schemas": { "Answer": { diff --git a/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json b/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json index b5818147f7c..1675fc8ca78 100644 --- a/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json +++ b/googleapiclient/discovery_cache/documents/mybusinessverifications.v1.json @@ -237,7 +237,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://mybusinessverifications.googleapis.com/", "schemas": { "AddressVerificationData": { diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json index 445f827ec71..fbc1231ae73 100644 --- a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json @@ -591,7 +591,7 @@ } } }, - "revision": "20230927", + "revision": "20231018", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json index 8bc85bab4a9..df83da409d6 100644 --- a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json @@ -591,7 +591,7 @@ } } }, - "revision": "20230927", + "revision": "20231018", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { diff --git a/googleapiclient/discovery_cache/documents/networksecurity.v1.json b/googleapiclient/discovery_cache/documents/networksecurity.v1.json index 01534e83675..257d5499bf0 100644 --- a/googleapiclient/discovery_cache/documents/networksecurity.v1.json +++ b/googleapiclient/discovery_cache/documents/networksecurity.v1.json @@ -2474,7 +2474,7 @@ } } }, - "revision": "20231003", + "revision": "20231017", "rootUrl": "https://networksecurity.googleapis.com/", "schemas": { "AddAddressGroupItemsRequest": { diff --git a/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json b/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json index b02b3bd9e15..893865eab0a 100644 --- a/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json @@ -3162,7 +3162,7 @@ } } }, - "revision": "20231003", + "revision": "20231017", "rootUrl": "https://networksecurity.googleapis.com/", "schemas": { "AddAddressGroupItemsRequest": { diff --git a/googleapiclient/discovery_cache/documents/networkservices.v1.json b/googleapiclient/discovery_cache/documents/networkservices.v1.json index f379dd2055b..5109361989f 100644 --- a/googleapiclient/discovery_cache/documents/networkservices.v1.json +++ b/googleapiclient/discovery_cache/documents/networkservices.v1.json @@ -2148,7 +2148,7 @@ } } }, - "revision": "20230927", + "revision": "20231018", "rootUrl": "https://networkservices.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/networkservices.v1beta1.json b/googleapiclient/discovery_cache/documents/networkservices.v1beta1.json index 10e72e2ffca..a751fd21511 100644 --- a/googleapiclient/discovery_cache/documents/networkservices.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/networkservices.v1beta1.json @@ -2483,7 +2483,7 @@ } } }, - "revision": "20230927", + "revision": "20231018", "rootUrl": "https://networkservices.googleapis.com/", "schemas": { "AuditConfig": { @@ -3685,7 +3685,7 @@ "type": "array" }, "forwardingRules": { - "description": "Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule.", + "description": "Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LbRouteExtension` resource per forwarding rule.", "items": { "type": "string" }, @@ -3699,7 +3699,7 @@ "type": "object" }, "loadBalancingScheme": { - "description": "Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`.", + "description": "Required. All backend services and forwarding rules referenced by this extension must share the same load balancing scheme. Supported values: `INTERNAL_MANAGED`, `EXTERNAL_MANAGED`. For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service).", "enum": [ "LOAD_BALANCING_SCHEME_UNSPECIFIED", "INTERNAL_MANAGED", @@ -3747,7 +3747,7 @@ "type": "array" }, "forwardingRules": { - "description": "Required. A list of references to the forwarding rules to which this service extension is attach to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule.", + "description": "Required. A list of references to the forwarding rules to which this service extension is attached to. At least one forwarding rule is required. There can be only one `LBTrafficExtension` resource per forwarding rule.", "items": { "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/notebooks.v1.json b/googleapiclient/discovery_cache/documents/notebooks.v1.json index 5ad58301a1f..8fc5983d46a 100644 --- a/googleapiclient/discovery_cache/documents/notebooks.v1.json +++ b/googleapiclient/discovery_cache/documents/notebooks.v1.json @@ -2008,7 +2008,7 @@ } } }, - "revision": "20231003", + "revision": "20231019", "rootUrl": "https://notebooks.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2707,7 +2707,7 @@ "additionalProperties": { "type": "string" }, - "description": "Custom metadata to apply to this instance.", + "description": "Custom metadata to apply to this instance. For example, to specify a Cloud Storage bucket for automatic backup, you can use the `gcs-data-bucket` metadata tag. Format: `\"--metadata=gcs-data-bucket=``BUCKET''\"`.", "type": "object" }, "migrated": { diff --git a/googleapiclient/discovery_cache/documents/notebooks.v2.json b/googleapiclient/discovery_cache/documents/notebooks.v2.json index 0e2a2c55c80..c48fff138c2 100644 --- a/googleapiclient/discovery_cache/documents/notebooks.v2.json +++ b/googleapiclient/discovery_cache/documents/notebooks.v2.json @@ -325,6 +325,31 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "getConfig": { + "description": "Gets general backend configurations that might also affect the frontend. Location is required by CCFE. Although we could bypass it to send location- less request directly to the backend job, we would need CPE (go/cloud-cpe). Having the location might also be useful depending on the query.", + "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances:getConfig", + "httpMethod": "GET", + "id": "notebooks.projects.locations.instances.getConfig", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. Format: `projects/{project_id}/locations/{location}`", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}/instances:getConfig", + "response": { + "$ref": "Config" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, "getIamPolicy": { "description": "Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.", "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:getIamPolicy", @@ -823,7 +848,7 @@ } } }, - "revision": "20231003", + "revision": "20231019", "rootUrl": "https://notebooks.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -967,6 +992,31 @@ }, "type": "object" }, + "Config": { + "description": "Response for getting WbI configurations in a location", + "id": "Config", + "properties": { + "availableImages": { + "description": "Output only. The list of available images to create a WbI.", + "items": { + "$ref": "ImageRelease" + }, + "readOnly": true, + "type": "array" + }, + "defaultValues": { + "$ref": "DefaultValues", + "description": "Output only. The default values for configuration.", + "readOnly": true + }, + "supportedValues": { + "$ref": "SupportedValues", + "description": "Output only. The supported values for configuration.", + "readOnly": true + } + }, + "type": "object" + }, "ContainerImage": { "description": "Definition of a container image for starting a notebook instance with the environment installed in a container.", "id": "ContainerImage", @@ -1030,6 +1080,18 @@ }, "type": "object" }, + "DefaultValues": { + "description": "DefaultValues represents the default configuration values.", + "id": "DefaultValues", + "properties": { + "machineType": { + "description": "Output only. The default machine type used by the backend if not provided by the user.", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "DiagnoseInstanceRequest": { "description": "Request for creating a notebook instance diagnostic file.", "id": "DiagnoseInstanceRequest", @@ -1237,6 +1299,23 @@ }, "type": "object" }, + "ImageRelease": { + "description": "ConfigImage represents an image release available to create a WbI", + "id": "ImageRelease", + "properties": { + "imageName": { + "description": "Output only. The name of the image of the form workbench-instances-vYYYYmmdd--", + "readOnly": true, + "type": "string" + }, + "releaseName": { + "description": "Output only. The release of the image of the form m123", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, "Instance": { "description": "The definition of a notebook instance.", "id": "Instance", @@ -1713,6 +1792,29 @@ "properties": {}, "type": "object" }, + "SupportedValues": { + "description": "SupportedValues represents the values supported by the configuration.", + "id": "SupportedValues", + "properties": { + "acceleratorTypes": { + "description": "Output only. The accelerator types supported by WbI.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + }, + "machineTypes": { + "description": "Output only. The machine types supported by WbI.", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, "TestIamPermissionsRequest": { "description": "Request message for `TestIamPermissions` method.", "id": "TestIamPermissionsRequest", diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json index 46b2093b4a6..f8b8de7929a 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json @@ -339,7 +339,7 @@ } } }, - "revision": "20231016", + "revision": "20231023", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json index d875565aea8..0716de9680a 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json @@ -339,7 +339,7 @@ } } }, - "revision": "20231016", + "revision": "20231023", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { diff --git a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json index d55837612d7..f60f9c522b3 100644 --- a/googleapiclient/discovery_cache/documents/orgpolicy.v2.json +++ b/googleapiclient/discovery_cache/documents/orgpolicy.v2.json @@ -915,7 +915,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://orgpolicy.googleapis.com/", "schemas": { "GoogleCloudOrgpolicyV2AlternatePolicySpec": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1.json b/googleapiclient/discovery_cache/documents/oslogin.v1.json index 06169ad2857..a365226adc2 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1.json @@ -343,7 +343,7 @@ } } }, - "revision": "20231008", + "revision": "20231022", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json index 8d3ce6d8155..cb9fd901efd 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1alpha.json @@ -471,7 +471,7 @@ } } }, - "revision": "20231008", + "revision": "20231022", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json index b34506470f0..2369b5c6d2c 100644 --- a/googleapiclient/discovery_cache/documents/oslogin.v1beta.json +++ b/googleapiclient/discovery_cache/documents/oslogin.v1beta.json @@ -441,7 +441,7 @@ } } }, - "revision": "20231008", + "revision": "20231022", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json b/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json index d4dec1af6d2..bc30f6337bf 100644 --- a/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json +++ b/googleapiclient/discovery_cache/documents/pagespeedonline.v5.json @@ -193,7 +193,7 @@ } } }, - "revision": "20231020", + "revision": "20231026", "rootUrl": "https://pagespeedonline.googleapis.com/", "schemas": { "AuditRefs": { diff --git a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json index d8d10920a7b..76b90870862 100644 --- a/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json +++ b/googleapiclient/discovery_cache/documents/paymentsresellersubscription.v1.json @@ -396,7 +396,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://paymentsresellersubscription.googleapis.com/", "schemas": { "GoogleCloudPaymentsResellerSubscriptionV1Amount": { diff --git a/googleapiclient/discovery_cache/documents/people.v1.json b/googleapiclient/discovery_cache/documents/people.v1.json index 6e062beca32..7ec675698a2 100644 --- a/googleapiclient/discovery_cache/documents/people.v1.json +++ b/googleapiclient/discovery_cache/documents/people.v1.json @@ -1172,7 +1172,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://people.googleapis.com/", "schemas": { "Address": { diff --git a/googleapiclient/discovery_cache/documents/places.v1.json b/googleapiclient/discovery_cache/documents/places.v1.json index 627b2381af9..d51c99e5e38 100644 --- a/googleapiclient/discovery_cache/documents/places.v1.json +++ b/googleapiclient/discovery_cache/documents/places.v1.json @@ -8,6 +8,12 @@ "https://www.googleapis.com/auth/maps-platform.places": { "description": "Private Service: https://www.googleapis.com/auth/maps-platform.places" }, + "https://www.googleapis.com/auth/maps-platform.places.details": { + "description": "Private Service: https://www.googleapis.com/auth/maps-platform.places.details" + }, + "https://www.googleapis.com/auth/maps-platform.places.nearbysearch": { + "description": "Private Service: https://www.googleapis.com/auth/maps-platform.places.nearbysearch" + }, "https://www.googleapis.com/auth/maps-platform.places.textsearch": { "description": "Private Service: https://www.googleapis.com/auth/maps-platform.places.textsearch" } @@ -113,6 +119,63 @@ "resources": { "places": { "methods": { + "get": { + "description": "Get place details with a place id (in a name) string.", + "flatPath": "v1/places/{placesId}", + "httpMethod": "GET", + "id": "places.places.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "languageCode": { + "description": "Optional. Place details will be displayed with the preferred language if available. Current list of supported languages: https://developers.google.com/maps/faq#languagesupport.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. A place ID returned in a Place (with \"places/\" prefix), or equivalently the name in the same Place. Format: places/*place_id*.", + "location": "path", + "pattern": "^places/[^/]+$", + "required": true, + "type": "string" + }, + "regionCode": { + "description": "Optional. The Unicode country/region code (CLDR) of the location where the request is coming from. This parameter is used to display the place details, like region-specific place name, if available. The parameter can affect results based on applicable law. For more information, see https://www.unicode.org/cldr/charts/latest/supplemental/territory_language_information.html. Note that 3-digit region codes are not currently supported.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "GoogleMapsPlacesV1Place" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/maps-platform.places", + "https://www.googleapis.com/auth/maps-platform.places.details" + ] + }, + "searchNearby": { + "description": "Search for places near locations.", + "flatPath": "v1/places:searchNearby", + "httpMethod": "POST", + "id": "places.places.searchNearby", + "parameterOrder": [], + "parameters": {}, + "path": "v1/places:searchNearby", + "request": { + "$ref": "GoogleMapsPlacesV1SearchNearbyRequest" + }, + "response": { + "$ref": "GoogleMapsPlacesV1SearchNearbyResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/maps-platform.places", + "https://www.googleapis.com/auth/maps-platform.places.nearbysearch" + ] + }, "searchText": { "description": "Text query based place search.", "flatPath": "v1/places:searchText", @@ -133,10 +196,59 @@ "https://www.googleapis.com/auth/maps-platform.places.textsearch" ] } + }, + "resources": { + "photos": { + "methods": { + "getMedia": { + "description": "Get a photo media with a photo reference string.", + "flatPath": "v1/places/{placesId}/photos/{photosId}/media", + "httpMethod": "GET", + "id": "places.places.photos.getMedia", + "parameterOrder": [ + "name" + ], + "parameters": { + "maxHeightPx": { + "description": "Optional. Specifies the maximum desired height, in pixels, of the image. If the image is smaller than the values specified, the original image will be returned. If the image is larger in either dimension, it will be scaled to match the smaller of the two dimensions, restricted to its original aspect ratio. Both the max_height_px and max_width_px properties accept an integer between 1 and 4800, inclusively. If the value is not within the allowed range, an INVALID_ARGUMENT error will be returned. At least one of max_height_px or max_width_px needs to be specified. If neither max_height_px nor max_width_px is specified, an INVALID_ARGUMENT error will be returned.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "maxWidthPx": { + "description": "Optional. Specifies the maximum desired width, in pixels, of the image. If the image is smaller than the values specified, the original image will be returned. If the image is larger in either dimension, it will be scaled to match the smaller of the two dimensions, restricted to its original aspect ratio. Both the max_height_px and max_width_px properties accept an integer between 1 and 4800, inclusively. If the value is not within the allowed range, an INVALID_ARGUMENT error will be returned. At least one of max_height_px or max_width_px needs to be specified. If neither max_height_px nor max_width_px is specified, an INVALID_ARGUMENT error will be returned.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "name": { + "description": "Required. The resource name of a photo media in the format: `places/place_id/photos/photo_reference/media`. The resource name of a photo as returned in a Place object's photos.name field comes with the format `places/place_id/photos/photo_reference`. You need to append `/media` at the end of the photo resource to get the photo media resource name.", + "location": "path", + "pattern": "^places/[^/]+/photos/[^/]+/media$", + "required": true, + "type": "string" + }, + "skipHttpRedirect": { + "description": "Optional. If set, skip the default HTTP redirect behavior and render a text format (for example, in JSON format for HTTP use case) response. If not set, an HTTP redirect will be issued to redirect the call to the image midea. This option is ignored for non-HTTP requests.", + "location": "query", + "type": "boolean" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "GoogleMapsPlacesV1PhotoMedia" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/maps-platform.places" + ] + } + } + } } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://places.googleapis.com/", "schemas": { "GoogleGeoTypeViewport": { @@ -159,18 +271,15 @@ "id": "GoogleMapsPlacesV1AuthorAttribution", "properties": { "displayName": { - "description": "Output only. Name of the author of the Photo or Review.", - "readOnly": true, + "description": "Name of the author of the Photo or Review.", "type": "string" }, "photoUri": { - "description": "Output only. Profile photo URI of the author of the Photo or Review.", - "readOnly": true, + "description": "Profile photo URI of the author of the Photo or Review.", "type": "string" }, "uri": { - "description": "Output only. URI of the author of the Photo or Review.", - "readOnly": true, + "description": "URI of the author of the Photo or Review.", "type": "string" } }, @@ -192,6 +301,204 @@ }, "type": "object" }, + "GoogleMapsPlacesV1EVChargeOptions": { + "description": "Information about the EV Charge Station hosted in Place. Terminology follows https://afdc.energy.gov/fuels/electricity_infrastructure.html One port could charge one car at a time. One port has one or more connectors. One station has one or more ports.", + "id": "GoogleMapsPlacesV1EVChargeOptions", + "properties": { + "connectorAggregation": { + "description": "A list of EV charging connector aggregations that contain connectors of the same type and same charge rate.", + "items": { + "$ref": "GoogleMapsPlacesV1EVChargeOptionsConnectorAggregation" + }, + "type": "array" + }, + "connectorCount": { + "description": "Number of connectors at this station. However, because some ports can have multiple connectors but only be able to charge one car at a time (e.g.) the number of connectors may be greater than the total number of cars which can charge simultaneously.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1EVChargeOptionsConnectorAggregation": { + "description": "EV charging information grouped by [type, max_charge_rate_kw]. Shows EV charge aggregation of connectors that have the same type and max charge rate in kw.", + "id": "GoogleMapsPlacesV1EVChargeOptionsConnectorAggregation", + "properties": { + "availabilityLastUpdateTime": { + "description": "The timestamp when the connector availability information in this aggregation was last updated.", + "format": "google-datetime", + "type": "string" + }, + "availableCount": { + "description": "Number of connectors in this aggregation that are currently available.", + "format": "int32", + "type": "integer" + }, + "count": { + "description": "Number of connectors in this aggregation.", + "format": "int32", + "type": "integer" + }, + "maxChargeRateKw": { + "description": "The static max charging rate in kw of each connector in the aggregation.", + "format": "double", + "type": "number" + }, + "outOfServiceCount": { + "description": "Number of connectors in this aggregation that are currently out of service.", + "format": "int32", + "type": "integer" + }, + "type": { + "description": "The connector type of this aggregation.", + "enum": [ + "EV_CONNECTOR_TYPE_UNSPECIFIED", + "EV_CONNECTOR_TYPE_OTHER", + "EV_CONNECTOR_TYPE_J1772", + "EV_CONNECTOR_TYPE_TYPE_2", + "EV_CONNECTOR_TYPE_CHADEMO", + "EV_CONNECTOR_TYPE_CCS_COMBO_1", + "EV_CONNECTOR_TYPE_CCS_COMBO_2", + "EV_CONNECTOR_TYPE_TESLA", + "EV_CONNECTOR_TYPE_UNSPECIFIED_GB_T", + "EV_CONNECTOR_TYPE_UNSPECIFIED_WALL_OUTLET" + ], + "enumDescriptions": [ + "Unspecified connector.", + "Other connector types.", + "J1772 type 1 connector.", + "IEC 62196 type 2 connector. Often referred to as MENNEKES.", + "CHAdeMO type connector.", + "Combined Charging System (AC and DC). Based on SAE. Type-1 J-1772 connector", + "Combined Charging System (AC and DC). Based on Type-2 Mennekes connector", + "The generic TESLA connector. This is NACS in the North America but can be non-NACS in other parts of the world (e.g. CCS Combo 2 (CCS2) or GB/T). This value is less representative of an actual connector type, and more represents the ability to charge a Tesla brand vehicle at a Tesla owned charging station.", + "GB/T type corresponds to the GB/T standard in China. This type covers all GB_T types.", + "Unspecified wall outlet." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1FuelOptions": { + "description": "The most recent information about fuel options in a gas station. This information is updated regularly.", + "id": "GoogleMapsPlacesV1FuelOptions", + "properties": { + "fuelPrices": { + "description": "The last known fuel price for each type of fuel this station has. There is one entry per fuel type this station has. Order is not important.", + "items": { + "$ref": "GoogleMapsPlacesV1FuelOptionsFuelPrice" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1FuelOptionsFuelPrice": { + "description": "Fuel price information for a given type.", + "id": "GoogleMapsPlacesV1FuelOptionsFuelPrice", + "properties": { + "price": { + "$ref": "GoogleTypeMoney", + "description": "The price of the fuel." + }, + "type": { + "description": "The type of fuel.", + "enum": [ + "FUEL_TYPE_UNSPECIFIED", + "DIESEL", + "REGULAR_UNLEADED", + "MIDGRADE", + "PREMIUM", + "SP91", + "SP91_E10", + "SP92", + "SP95", + "SP95_E10", + "SP98", + "SP99", + "SP100", + "LPG", + "E80", + "E85", + "METHANE", + "BIO_DIESEL", + "TRUCK_DIESEL" + ], + "enumDescriptions": [ + "Unspecified fuel type.", + "Diesel fuel.", + "Regular unleaded.", + "Midgrade.", + "Premium.", + "SP 91.", + "SP 91 E10.", + "SP 92.", + "SP 95.", + "SP95 E10.", + "SP 98.", + "SP 99.", + "SP 100.", + "LPG.", + "E 80.", + "E 85.", + "Methane.", + "Bio-diesel.", + "Truck diesel." + ], + "type": "string" + }, + "updateTime": { + "description": "The time the fuel price was last updated.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1Photo": { + "description": "Information about a photo of a place.", + "id": "GoogleMapsPlacesV1Photo", + "properties": { + "authorAttributions": { + "description": "This photo's authors.", + "items": { + "$ref": "GoogleMapsPlacesV1AuthorAttribution" + }, + "type": "array" + }, + "heightPx": { + "description": "The maximum available height, in pixels.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "Identifier. A reference representing this place photo which may be used to look up this place photo again (a.k.a. the API \"resource\" name: places/{place_id}/photos/{photo}).", + "type": "string" + }, + "widthPx": { + "description": "The maximum available width, in pixels.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1PhotoMedia": { + "description": "A photo media from Places API.", + "id": "GoogleMapsPlacesV1PhotoMedia", + "properties": { + "name": { + "description": "The resource name of a photo media in the format: `places/place_id/photos/photo_reference/media`.", + "type": "string" + }, + "photoUri": { + "description": "A short-lived uri that can be used to render the photo.", + "type": "string" + } + }, + "type": "object" + }, "GoogleMapsPlacesV1Place": { "description": "All the information representing a Place.", "id": "GoogleMapsPlacesV1Place", @@ -211,6 +518,10 @@ "description": "The place's address in adr microformat: http://microformats.org/wiki/adr.", "type": "string" }, + "allowsDogs": { + "description": "Place allows dogs.", + "type": "boolean" + }, "attributions": { "description": "A set of data provider that must be shown with this result.", "items": { @@ -265,10 +576,30 @@ "$ref": "GoogleTypeLocalizedText", "description": "Contains a summary of the place. A summary is comprised of a textual overview, and also includes the language code for these if applicable. Summary text must be presented as-is and can not be modified or altered." }, + "evChargeOptions": { + "$ref": "GoogleMapsPlacesV1EVChargeOptions", + "description": "Information of ev charging options." + }, "formattedAddress": { "description": "A full, human-readable address for this place.", "type": "string" }, + "fuelOptions": { + "$ref": "GoogleMapsPlacesV1FuelOptions", + "description": "The most recent information about fuel options in a gas station. This information is updated regularly." + }, + "goodForChildren": { + "description": "Place is good for children.", + "type": "boolean" + }, + "goodForGroups": { + "description": "Place accommodates groups.", + "type": "boolean" + }, + "goodForWatchingSports": { + "description": "Place is suitable for watching sports.", + "type": "boolean" + }, "googleMapsUri": { "description": "A URL providing more information about this place.", "type": "string" @@ -278,7 +609,7 @@ "type": "string" }, "iconMaskBaseUri": { - "description": "A truncated URL to an v2 icon mask. User can access different icon type by appending type suffix to the end (eg, \".svg\" or \".png\").", + "description": "A truncated URL to an icon mask. User can access different icon type by appending type suffix to the end (eg, \".svg\" or \".png\").", "type": "string" }, "id": { @@ -289,10 +620,18 @@ "description": "A human-readable phone number for the place, in international format.", "type": "string" }, + "liveMusic": { + "description": "Place provides live music.", + "type": "boolean" + }, "location": { "$ref": "GoogleTypeLatLng", "description": "The position of this place." }, + "menuForChildren": { + "description": "Place has a children's menu.", + "type": "boolean" + }, "name": { "description": "An ID representing this place which may be used to look up this place again (a.k.a. the API \"resource\" name: places/place_id).", "type": "string" @@ -301,6 +640,25 @@ "description": "A human-readable phone number for the place, in national format.", "type": "string" }, + "outdoorSeating": { + "description": "Place provides outdoor seating.", + "type": "boolean" + }, + "parkingOptions": { + "$ref": "GoogleMapsPlacesV1PlaceParkingOptions", + "description": "Options of parking provided by the place." + }, + "paymentOptions": { + "$ref": "GoogleMapsPlacesV1PlacePaymentOptions", + "description": "Payment options the place accepts. If a payment option data is not available, the payment option field will be unset." + }, + "photos": { + "description": "Information (including references) about photos of this place.", + "items": { + "$ref": "GoogleMapsPlacesV1Photo" + }, + "type": "array" + }, "plusCode": { "$ref": "GoogleMapsPlacesV1PlacePlusCode", "description": "Plus code of the place location lat/long." @@ -325,6 +683,14 @@ ], "type": "string" }, + "primaryType": { + "description": "The primary type of the given result. This type must one of the Places API supported types. For example, \"restaurant\", \"cafe\", \"airport\", etc. A place can only have a single primary type. For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types", + "type": "string" + }, + "primaryTypeDisplayName": { + "$ref": "GoogleTypeLocalizedText", + "description": "The display name of the primary type, localized to the request language if applicable. For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types" + }, "rating": { "description": "A rating between 1.0 and 5.0, based on user reviews of this place.", "format": "double", @@ -345,8 +711,12 @@ "description": "Specifies if the place supports reservations.", "type": "boolean" }, + "restroom": { + "description": "Place has restroom.", + "type": "boolean" + }, "reviews": { - "description": "List of reviews about this place.", + "description": "List of reviews about this place, sorted by relevance.", "items": { "$ref": "GoogleMapsPlacesV1Review" }, @@ -364,6 +734,18 @@ "description": "Specifies if the place serves brunch.", "type": "boolean" }, + "servesCocktails": { + "description": "Place serves cocktails.", + "type": "boolean" + }, + "servesCoffee": { + "description": "Place serves coffee.", + "type": "boolean" + }, + "servesDessert": { + "description": "Place serves dessert.", + "type": "boolean" + }, "servesDinner": { "description": "Specifies if the place serves dinner.", "type": "boolean" @@ -380,12 +762,23 @@ "description": "Specifies if the place serves wine.", "type": "boolean" }, + "shortFormattedAddress": { + "description": "A short, human-readable address for this place.", + "type": "string" + }, + "subDestinations": { + "description": "A list of sub destinations related to the place.", + "items": { + "$ref": "GoogleMapsPlacesV1PlaceSubDestination" + }, + "type": "array" + }, "takeout": { "description": "Specifies if the business supports takeout.", "type": "boolean" }, "types": { - "description": "A set of type tags for this result. For example, \"political\" and \"locality\". See: https://developers.google.com/maps/documentation/places/web-service/place-types", + "description": "A set of type tags for this result. For example, \"political\" and \"locality\". For the complete list of possible values, see Table A and Table B at https://developers.google.com/maps/documentation/places/web-service/place-types", "items": { "type": "string" }, @@ -419,6 +812,18 @@ "wheelchairAccessibleEntrance": { "description": "Places has wheelchair accessible entrance.", "type": "boolean" + }, + "wheelchairAccessibleParking": { + "description": "Place offers wheelchair accessible parking.", + "type": "boolean" + }, + "wheelchairAccessibleRestroom": { + "description": "Place has wheelchair accessible restroom.", + "type": "boolean" + }, + "wheelchairAccessibleSeating": { + "description": "Place has wheelchair accessible seating.", + "type": "boolean" } }, "type": "object" @@ -588,6 +993,64 @@ }, "type": "object" }, + "GoogleMapsPlacesV1PlaceParkingOptions": { + "description": "Information about parking options for the place. A parking lot could support more than one option at the same time.", + "id": "GoogleMapsPlacesV1PlaceParkingOptions", + "properties": { + "freeGarageParking": { + "description": "Place offers free garage parking.", + "type": "boolean" + }, + "freeParkingLot": { + "description": "Place offers free parking lots.", + "type": "boolean" + }, + "freeStreetParking": { + "description": "Place offers free street parking.", + "type": "boolean" + }, + "paidGarageParking": { + "description": "Place offers paid garage parking.", + "type": "boolean" + }, + "paidParkingLot": { + "description": "Place offers paid parking lots.", + "type": "boolean" + }, + "paidStreetParking": { + "description": "Place offers paid street parking.", + "type": "boolean" + }, + "valetParking": { + "description": "Place offers valet parking.", + "type": "boolean" + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1PlacePaymentOptions": { + "description": "Payment options the place accepts.", + "id": "GoogleMapsPlacesV1PlacePaymentOptions", + "properties": { + "acceptsCashOnly": { + "description": "Place accepts cash only as payment. Places with this attribute may still accept other payment methods.", + "type": "boolean" + }, + "acceptsCreditCards": { + "description": "Place accepts credit cards as payment.", + "type": "boolean" + }, + "acceptsDebitCards": { + "description": "Place accepts debit cards as payment.", + "type": "boolean" + }, + "acceptsNfc": { + "description": "Place accepts NFC payments.", + "type": "boolean" + } + }, + "type": "object" + }, "GoogleMapsPlacesV1PlacePlusCode": { "description": "Plus code (http://plus.codes) is a location reference with two formats: global code defining a 14mx14m (1/8000th of a degree) or smaller rectangle, and compound code, replacing the prefix with a reference location.", "id": "GoogleMapsPlacesV1PlacePlusCode", @@ -603,41 +1066,145 @@ }, "type": "object" }, + "GoogleMapsPlacesV1PlaceSubDestination": { + "description": "Place resource name and id of sub destinations that relate to the place. For example, different terminals are different destinations of an airport.", + "id": "GoogleMapsPlacesV1PlaceSubDestination", + "properties": { + "id": { + "description": "The place id of the sub destination.", + "type": "string" + }, + "name": { + "description": "The resource name of the sub destination.", + "type": "string" + } + }, + "type": "object" + }, "GoogleMapsPlacesV1Review": { "description": "Information about a review of a place.", "id": "GoogleMapsPlacesV1Review", "properties": { "authorAttribution": { "$ref": "GoogleMapsPlacesV1AuthorAttribution", - "description": "Output only. This review's author.", - "readOnly": true + "description": "This review's author." + }, + "name": { + "description": "A reference representing this place review which may be used to look up this place review again (also called the API \"resource\" name: places/place_id/reviews/review).", + "type": "string" }, "originalText": { "$ref": "GoogleTypeLocalizedText", - "description": "Output only. The review text in its original language.", - "readOnly": true + "description": "The review text in its original language." }, "publishTime": { - "description": "Output only. Timestamp for the review.", + "description": "Timestamp for the review.", "format": "google-datetime", - "readOnly": true, "type": "string" }, "rating": { - "description": "Output only. A number between 1.0 and 5.0, a.k.a. the number of stars.", + "description": "A number between 1.0 and 5.0, also called the number of stars.", "format": "double", - "readOnly": true, "type": "number" }, "relativePublishTimeDescription": { - "description": "Output only. A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country.", - "readOnly": true, + "description": "A string of formatted recent time, expressing the review time relative to the current time in a form appropriate for the language and country.", "type": "string" }, "text": { "$ref": "GoogleTypeLocalizedText", - "description": "Output only. The localized text of the review.", - "readOnly": true + "description": "The localized text of the review." + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1SearchNearbyRequest": { + "description": "Request proto for Search Nearby. ", + "id": "GoogleMapsPlacesV1SearchNearbyRequest", + "properties": { + "excludedPrimaryTypes": { + "description": "Excluded primary Place type (e.g. \"restaurant\" or \"gas_station\") from https://developers.google.com/maps/documentation/places/web-service/place-types. If there are any conflicting primary types, i.e. a type appears in both included_primary_types and excluded_primary_types, an INVALID_ARGUMENT error is returned. If a Place type is specified with multiple type restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = [\"restaurant\"], excluded_primary_types = [\"restaurant\"]}, the returned places provide \"restaurant\" related services but do not operate primarily as \"restaurants\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "excludedTypes": { + "description": "Excluded Place type (eg, \"restaurant\" or \"gas_station\") from https://developers.google.com/maps/documentation/places/web-service/place-types. If the client provides both included_types (e.g. restaurant) and excluded_types (e.g. cafe), then the response should include places that are restaurant but not cafe. The response includes places that match at least one of the included_types and none of the excluded_types. If there are any conflicting types, i.e. a type appears in both included_types and excluded_types, an INVALID_ARGUMENT error is returned. If a Place type is specified with multiple type restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = [\"restaurant\"], excluded_primary_types = [\"restaurant\"]}, the returned places provide \"restaurant\" related services but do not operate primarily as \"restaurants\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "includedPrimaryTypes": { + "description": "Included primary Place type (e.g. \"restaurant\" or \"gas_station\") from https://developers.google.com/maps/documentation/places/web-service/place-types. A place can only have a single primary type from the supported types table associated with it. If there are any conflicting primary types, i.e. a type appears in both included_primary_types and excluded_primary_types, an INVALID_ARGUMENT error is returned. If a Place type is specified with multiple type restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = [\"restaurant\"], excluded_primary_types = [\"restaurant\"]}, the returned places provide \"restaurant\" related services but do not operate primarily as \"restaurants\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "includedTypes": { + "description": "Included Place type (eg, \"restaurant\" or \"gas_station\") from https://developers.google.com/maps/documentation/places/web-service/place-types. If there are any conflicting types, i.e. a type appears in both included_types and excluded_types, an INVALID_ARGUMENT error is returned. If a Place type is specified with multiple type restrictions, only places that satisfy all of the restrictions are returned. For example, if we have {included_types = [\"restaurant\"], excluded_primary_types = [\"restaurant\"]}, the returned places provide \"restaurant\" related services but do not operate primarily as \"restaurants\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "languageCode": { + "description": "Place details will be displayed with the preferred language if available. If the language code is unspecified or unrecognized, place details of any language may be returned, with a preference for English if such details exist. Current list of supported languages: https://developers.google.com/maps/faq#languagesupport.", + "type": "string" + }, + "locationRestriction": { + "$ref": "GoogleMapsPlacesV1SearchNearbyRequestLocationRestriction", + "description": "Required. The region to search." + }, + "maxResultCount": { + "description": "Maximum number of results to return. It must be between 1 and 20 (default), inclusively. If the number is unset, it falls back to the upper limit. If the number is set to negative or exceeds the upper limit, an INVALID_ARGUMENT error is returned.", + "format": "int32", + "type": "integer" + }, + "rankPreference": { + "description": "How results will be ranked in the response.", + "enum": [ + "RANK_PREFERENCE_UNSPECIFIED", + "DISTANCE", + "POPULARITY" + ], + "enumDescriptions": [ + "RankPreference value not set. Will use rank by POPULARITY by default.", + "Ranks results by distance.", + "Ranks results by popularity." + ], + "type": "string" + }, + "regionCode": { + "description": "The Unicode country/region code (CLDR) of the location where the request is coming from. This parameter is used to display the place details, like region-specific place name, if available. The parameter can affect results based on applicable law. For more information, see https://www.unicode.org/cldr/charts/latest/supplemental/territory_language_information.html. Note that 3-digit region codes are not currently supported.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1SearchNearbyRequestLocationRestriction": { + "description": "The region to search.", + "id": "GoogleMapsPlacesV1SearchNearbyRequestLocationRestriction", + "properties": { + "circle": { + "$ref": "GoogleMapsPlacesV1Circle", + "description": "A circle defined by center point and radius." + } + }, + "type": "object" + }, + "GoogleMapsPlacesV1SearchNearbyResponse": { + "description": "Response proto for Search Nearby. ", + "id": "GoogleMapsPlacesV1SearchNearbyResponse", + "properties": { + "places": { + "description": "A list of places that meets user's requirements like places types, number of places and specific location restriction.", + "items": { + "$ref": "GoogleMapsPlacesV1Place" + }, + "type": "array" } }, "type": "object" @@ -663,7 +1230,7 @@ "description": "The region to search. This location serves as a restriction which means results outside given location will not be returned. Cannot be set along with location_bias." }, "maxResultCount": { - "description": "Maximum number of results to return. It must be between 1 and 20, inclusively. If the number is unset, it falls back to the upper limit. If the number is set to negative or exceeds the upper limit, an INVALID_ARGUMENT error is returned.", + "description": "Maximum number of results to return. It must be between 1 and 20, inclusively. The default is 20. If the number is unset, it falls back to the upper limit. If the number is set to negative or exceeds the upper limit, an INVALID_ARGUMENT error is returned.", "format": "int32", "type": "integer" }, @@ -673,7 +1240,7 @@ "type": "number" }, "openNow": { - "description": "Used to restrict the search to places that are currently open.", + "description": "Used to restrict the search to places that are currently open. The default is false.", "type": "boolean" }, "priceLevels": { @@ -821,6 +1388,27 @@ } }, "type": "object" + }, + "GoogleTypeMoney": { + "description": "Represents an amount of money with its currency type.", + "id": "GoogleTypeMoney", + "properties": { + "currencyCode": { + "description": "The three-letter currency code defined in ISO 4217.", + "type": "string" + }, + "nanos": { + "description": "Number of nano (10^-9) units of the amount. The value must be between -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` must be positive or zero. If `units` is zero, `nanos` can be positive, zero, or negative. If `units` is negative, `nanos` must be negative or zero. For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000.", + "format": "int32", + "type": "integer" + }, + "units": { + "description": "The whole units of the amount. For example if `currencyCode` is `\"USD\"`, then 1 unit is one US dollar.", + "format": "int64", + "type": "string" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json index e6e5e7a7bc7..b0617c04cba 100644 --- a/googleapiclient/discovery_cache/documents/playcustomapp.v1.json +++ b/googleapiclient/discovery_cache/documents/playcustomapp.v1.json @@ -158,7 +158,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://playcustomapp.googleapis.com/", "schemas": { "CustomApp": { diff --git a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json index 877d6e10b83..a97e0b9835f 100644 --- a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json @@ -941,7 +941,7 @@ } } }, - "revision": "20231023", + "revision": "20231029", "rootUrl": "https://playdeveloperreporting.googleapis.com/", "schemas": { "GooglePlayDeveloperReportingV1alpha1Anomaly": { diff --git a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json index 5dfec0d77cd..932c3b34a18 100644 --- a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json @@ -941,7 +941,7 @@ } } }, - "revision": "20231023", + "revision": "20231029", "rootUrl": "https://playdeveloperreporting.googleapis.com/", "schemas": { "GooglePlayDeveloperReportingV1beta1Anomaly": { diff --git a/googleapiclient/discovery_cache/documents/playgrouping.v1alpha1.json b/googleapiclient/discovery_cache/documents/playgrouping.v1alpha1.json index a0dc2eeebb0..daa60dc49d8 100644 --- a/googleapiclient/discovery_cache/documents/playgrouping.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/playgrouping.v1alpha1.json @@ -177,7 +177,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://playgrouping.googleapis.com/", "schemas": { "CreateOrUpdateTagsRequest": { diff --git a/googleapiclient/discovery_cache/documents/playintegrity.v1.json b/googleapiclient/discovery_cache/documents/playintegrity.v1.json index 96249bdb914..f84f14e2538 100644 --- a/googleapiclient/discovery_cache/documents/playintegrity.v1.json +++ b/googleapiclient/discovery_cache/documents/playintegrity.v1.json @@ -138,7 +138,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://playintegrity.googleapis.com/", "schemas": { "AccountActivity": { diff --git a/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json b/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json index 0967a11baa7..8538624a18d 100644 --- a/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json +++ b/googleapiclient/discovery_cache/documents/policyanalyzer.v1.json @@ -163,7 +163,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://policyanalyzer.googleapis.com/", "schemas": { "GoogleCloudPolicyanalyzerV1Activity": { diff --git a/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json b/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json index d254295e273..3aaf09de40c 100644 --- a/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/policyanalyzer.v1beta1.json @@ -163,7 +163,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://policyanalyzer.googleapis.com/", "schemas": { "GoogleCloudPolicyanalyzerV1beta1Activity": { diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1.json b/googleapiclient/discovery_cache/documents/policysimulator.v1.json index fae84244508..5750e668936 100644 --- a/googleapiclient/discovery_cache/documents/policysimulator.v1.json +++ b/googleapiclient/discovery_cache/documents/policysimulator.v1.json @@ -707,7 +707,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://policysimulator.googleapis.com/", "schemas": { "GoogleCloudOrgpolicyV2AlternatePolicySpec": { diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1alpha.json b/googleapiclient/discovery_cache/documents/policysimulator.v1alpha.json index 6afb2abc053..cf1b289e18d 100644 --- a/googleapiclient/discovery_cache/documents/policysimulator.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/policysimulator.v1alpha.json @@ -1047,7 +1047,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://policysimulator.googleapis.com/", "schemas": { "GoogleCloudOrgpolicyV2AlternatePolicySpec": { diff --git a/googleapiclient/discovery_cache/documents/policysimulator.v1beta.json b/googleapiclient/discovery_cache/documents/policysimulator.v1beta.json index 3cd8428df31..10b1654e920 100644 --- a/googleapiclient/discovery_cache/documents/policysimulator.v1beta.json +++ b/googleapiclient/discovery_cache/documents/policysimulator.v1beta.json @@ -1047,7 +1047,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://policysimulator.googleapis.com/", "schemas": { "GoogleCloudOrgpolicyV2AlternatePolicySpec": { diff --git a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json index 51279b4a448..b6ae254bc32 100644 --- a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json +++ b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1.json @@ -128,7 +128,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://policytroubleshooter.googleapis.com/", "schemas": { "GoogleCloudPolicytroubleshooterV1AccessTuple": { diff --git a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json index efc706fb2be..424dd30aa48 100644 --- a/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json +++ b/googleapiclient/discovery_cache/documents/policytroubleshooter.v1beta.json @@ -128,7 +128,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://policytroubleshooter.googleapis.com/", "schemas": { "GoogleCloudPolicytroubleshooterV1betaAccessTuple": { diff --git a/googleapiclient/discovery_cache/documents/privateca.v1.json b/googleapiclient/discovery_cache/documents/privateca.v1.json index be876261627..d7e58d1ca29 100644 --- a/googleapiclient/discovery_cache/documents/privateca.v1.json +++ b/googleapiclient/discovery_cache/documents/privateca.v1.json @@ -1605,7 +1605,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://privateca.googleapis.com/", "schemas": { "AccessUrls": { diff --git a/googleapiclient/discovery_cache/documents/privateca.v1beta1.json b/googleapiclient/discovery_cache/documents/privateca.v1beta1.json index 6317ca77ad2..64eb54503a5 100644 --- a/googleapiclient/discovery_cache/documents/privateca.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/privateca.v1beta1.json @@ -580,7 +580,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://privateca.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json b/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json index 5804f1c63ed..215b59256d8 100644 --- a/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/prod_tt_sasportal.v1alpha1.json @@ -2637,7 +2637,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://prod-tt-sasportal.googleapis.com/", "schemas": { "SasPortalAssignment": { diff --git a/googleapiclient/discovery_cache/documents/publicca.v1.json b/googleapiclient/discovery_cache/documents/publicca.v1.json index ae5939b79df..dba8a16d60b 100644 --- a/googleapiclient/discovery_cache/documents/publicca.v1.json +++ b/googleapiclient/discovery_cache/documents/publicca.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231016", + "revision": "20231024", "rootUrl": "https://publicca.googleapis.com/", "schemas": { "ExternalAccountKey": { diff --git a/googleapiclient/discovery_cache/documents/publicca.v1alpha1.json b/googleapiclient/discovery_cache/documents/publicca.v1alpha1.json index a1dcc47073e..24aad0debc1 100644 --- a/googleapiclient/discovery_cache/documents/publicca.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/publicca.v1alpha1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231016", + "revision": "20231024", "rootUrl": "https://publicca.googleapis.com/", "schemas": { "ExternalAccountKey": { diff --git a/googleapiclient/discovery_cache/documents/publicca.v1beta1.json b/googleapiclient/discovery_cache/documents/publicca.v1beta1.json index 2775e2532fa..431a3c126f7 100644 --- a/googleapiclient/discovery_cache/documents/publicca.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/publicca.v1beta1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231016", + "revision": "20231024", "rootUrl": "https://publicca.googleapis.com/", "schemas": { "ExternalAccountKey": { diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1.json b/googleapiclient/discovery_cache/documents/pubsub.v1.json index d69034253bc..c745907b44a 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1.json @@ -164,7 +164,7 @@ "type": "string" }, "schemaId": { - "description": "The ID to use for the schema, which will become the final component of the schema's resource name. See https://cloud.google.com/pubsub/docs/admin#resource_names for resource name constraints.", + "description": "The ID to use for the schema, which will become the final component of the schema's resource name. See https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names for resource name constraints.", "location": "query", "type": "string" } @@ -566,7 +566,7 @@ "snapshots": { "methods": { "create": { - "description": "Creates a snapshot from the requested subscription. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. If the snapshot already exists, returns `ALREADY_EXISTS`. If the requested subscription doesn't exist, returns `NOT_FOUND`. If the backlog in the subscription is too old -- and the resulting snapshot would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. See also the `Snapshot.expire_time` field. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Snapshot object. Note that for REST API requests, you must specify a name in the request.", + "description": "Creates a snapshot from the requested subscription. Snapshots are used in [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, which allow you to manage message acknowledgments in bulk. That is, you can set the acknowledgment state of messages in an existing subscription to the state captured by a snapshot. If the snapshot already exists, returns `ALREADY_EXISTS`. If the requested subscription doesn't exist, returns `NOT_FOUND`. If the backlog in the subscription is too old -- and the resulting snapshot would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. See also the `Snapshot.expire_time` field. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The generated name is populated in the returned Snapshot object. Note that for REST API requests, you must specify a name in the request.", "flatPath": "v1/projects/{projectsId}/snapshots/{snapshotsId}", "httpMethod": "PUT", "id": "pubsub.projects.snapshots.create", @@ -575,7 +575,7 @@ ], "parameters": { "name": { - "description": "Required. User-provided name for this snapshot. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription. Note that for REST API requests, you must specify a name. See the [resource name rules](https://cloud.google.com/pubsub/docs/admin#resource_names). Format is `projects/{project}/snapshots/{snap}`.", + "description": "Required. User-provided name for this snapshot. If the name is not provided in the request, the server will assign a random name for this snapshot on the same project as the subscription. Note that for REST API requests, you must specify a name. See the [resource name rules](https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). Format is `projects/{project}/snapshots/{snap}`.", "location": "path", "pattern": "^projects/[^/]+/snapshots/[^/]+$", "required": true, @@ -836,7 +836,7 @@ ] }, "create": { - "description": "Creates a subscription to a given topic. See the [resource name rules] (https://cloud.google.com/pubsub/docs/admin#resource_names). If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated name is populated in the returned Subscription object. Note that for REST API requests, you must specify a name in the request.", + "description": "Creates a subscription to a given topic. See the [resource name rules] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). If the subscription already exists, returns `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is not provided in the request, the server will assign a random name for this subscription on the same project as the topic, conforming to the [resource name format] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names). The generated name is populated in the returned Subscription object. Note that for REST API requests, you must specify a name in the request.", "flatPath": "v1/projects/{projectsId}/subscriptions/{subscriptionsId}", "httpMethod": "PUT", "id": "pubsub.projects.subscriptions.create", @@ -1219,7 +1219,7 @@ "topics": { "methods": { "create": { - "description": "Creates the given topic with the given name. See the [resource name rules] (https://cloud.google.com/pubsub/docs/admin#resource_names).", + "description": "Creates the given topic with the given name. See the [resource name rules] (https://cloud.google.com/pubsub/docs/pubsub-basics#resource_names).", "flatPath": "v1/projects/{projectsId}/topics/{topicsId}", "httpMethod": "PUT", "id": "pubsub.projects.topics.create", @@ -1573,7 +1573,7 @@ } } }, - "revision": "20231010", + "revision": "20231024", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { @@ -1946,7 +1946,7 @@ "id": "MessageStoragePolicy", "properties": { "allowedPersistenceRegions": { - "description": "Optional. A list of IDs of GCP regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed GCP regions (or running outside of GCP altogether) will be routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration.", + "description": "Optional. A list of IDs of Google Cloud regions where messages that are published to the topic may be persisted in storage. Messages published by publishers running in non-allowed Google Cloud regions (or running outside of Google Cloud altogether) are routed for storage in one of the allowed regions. An empty list means that no regions are allowed, and is not a valid configuration.", "items": { "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json index d903f3c95da..a5b75cf0792 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta1a.json @@ -464,7 +464,7 @@ } } }, - "revision": "20231010", + "revision": "20231024", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { diff --git a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json index 8c5e6084d7f..9f0aab3b0e2 100644 --- a/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/pubsub.v1beta2.json @@ -731,7 +731,7 @@ } } }, - "revision": "20231010", + "revision": "20231024", "rootUrl": "https://pubsub.googleapis.com/", "schemas": { "AcknowledgeRequest": { diff --git a/googleapiclient/discovery_cache/documents/pubsublite.v1.json b/googleapiclient/discovery_cache/documents/pubsublite.v1.json index 2aeff3ec23a..22133682e7d 100644 --- a/googleapiclient/discovery_cache/documents/pubsublite.v1.json +++ b/googleapiclient/discovery_cache/documents/pubsublite.v1.json @@ -1040,7 +1040,7 @@ } } }, - "revision": "20231006", + "revision": "20231020", "rootUrl": "https://pubsublite.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/rapidmigrationassessment.v1.json b/googleapiclient/discovery_cache/documents/rapidmigrationassessment.v1.json index 3d475208079..60ecf8bef16 100644 --- a/googleapiclient/discovery_cache/documents/rapidmigrationassessment.v1.json +++ b/googleapiclient/discovery_cache/documents/rapidmigrationassessment.v1.json @@ -633,7 +633,7 @@ } } }, - "revision": "20231013", + "revision": "20231021", "rootUrl": "https://rapidmigrationassessment.googleapis.com/", "schemas": { "Annotation": { diff --git a/googleapiclient/discovery_cache/documents/readerrevenuesubscriptionlinking.v1.json b/googleapiclient/discovery_cache/documents/readerrevenuesubscriptionlinking.v1.json index 14cc8c4aee4..d0460c2cdaf 100644 --- a/googleapiclient/discovery_cache/documents/readerrevenuesubscriptionlinking.v1.json +++ b/googleapiclient/discovery_cache/documents/readerrevenuesubscriptionlinking.v1.json @@ -207,7 +207,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://readerrevenuesubscriptionlinking.googleapis.com/", "schemas": { "DeleteReaderResponse": { diff --git a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json index 3bbd58fef4b..b99c9199d1c 100644 --- a/googleapiclient/discovery_cache/documents/realtimebidding.v1.json +++ b/googleapiclient/discovery_cache/documents/realtimebidding.v1.json @@ -1305,7 +1305,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://realtimebidding.googleapis.com/", "schemas": { "ActivatePretargetingConfigRequest": { diff --git a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json index d820810c175..2bc86196874 100644 --- a/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json +++ b/googleapiclient/discovery_cache/documents/recaptchaenterprise.v1.json @@ -666,7 +666,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://recaptchaenterprise.googleapis.com/", "schemas": { "GoogleCloudRecaptchaenterpriseV1AccountDefenderAssessment": { @@ -776,6 +776,10 @@ "description": "The request message to annotate an Assessment.", "id": "GoogleCloudRecaptchaenterpriseV1AnnotateAssessmentRequest", "properties": { + "accountId": { + "description": "Optional. A stable account identifier to apply to the assessment. This is an alternative to setting `account_id` in `CreateAssessment`, for example when a stable account identifier is not yet known in the initial request.", + "type": "string" + }, "annotation": { "description": "Optional. The annotation that will be assigned to the Event. This field can be left empty to provide reasons that apply to an event without concluding whether the event is legitimate or fraudulent.", "enum": [ @@ -802,7 +806,7 @@ "type": "string" }, "hashedAccountId": { - "description": "Optional. Unique stable hashed user identifier to apply to the assessment. This is an alternative to setting the hashed_account_id in CreateAssessment, for example when the account identifier is not yet known in the initial request. It is recommended that the identifier is hashed using hmac-sha256 with stable secret.", + "description": "Optional. A stable hashed account identifier to apply to the assessment. This is an alternative to setting `hashed_account_id` in `CreateAssessment`, for example when a stable account identifier is not yet known in the initial request.", "format": "byte", "type": "string" }, @@ -885,7 +889,7 @@ "properties": { "accountDefenderAssessment": { "$ref": "GoogleCloudRecaptchaenterpriseV1AccountDefenderAssessment", - "description": "Output only. Assessment returned by account defender when a hashed_account_id is provided.", + "description": "Output only. Assessment returned by account defender when an account identifier is provided.", "readOnly": true }, "accountVerification": { @@ -1003,7 +1007,8 @@ "type": "boolean" }, "hashedAccountId": { - "description": "Optional. Unique stable hashed user identifier for the request. The identifier must be hashed using hmac-sha256 with stable secret.", + "deprecated": true, + "description": "Optional. Deprecated: use `user_info.account_id` instead. Unique stable hashed user identifier for the request. The identifier must be hashed using hmac-sha256 with stable secret.", "format": "byte", "type": "string" }, @@ -1038,6 +1043,10 @@ "description": "Optional. The user agent present in the request from the user's device related to this event.", "type": "string" }, + "userInfo": { + "$ref": "GoogleCloudRecaptchaenterpriseV1UserInfo", + "description": "Optional. Information about the user that generates this event, when they can be identified. They are often identified through the use of an account for logged-in requests or login/registration requests, or by providing user identifiers for guest actions like checkout." + }, "userIpAddress": { "description": "Optional. The IP address in the request from the user's device related to this event.", "type": "string" @@ -2007,6 +2016,48 @@ }, "type": "object" }, + "GoogleCloudRecaptchaenterpriseV1UserId": { + "description": "An identifier associated with a user.", + "id": "GoogleCloudRecaptchaenterpriseV1UserId", + "properties": { + "email": { + "description": "Optional. An email address.", + "type": "string" + }, + "phoneNumber": { + "description": "Optional. A phone number. Should use the E.164 format.", + "type": "string" + }, + "username": { + "description": "Optional. A unique username, if different from all the other identifiers and `account_id` that are provided. Can be a unique login handle or display name for a user.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudRecaptchaenterpriseV1UserInfo": { + "description": "User information associated with a request protected by reCAPTCHA Enterprise.", + "id": "GoogleCloudRecaptchaenterpriseV1UserInfo", + "properties": { + "accountId": { + "description": "Optional. For logged-in requests or login/registration requests, the unique account identifier associated with this user. You can use the username if it is stable (meaning it is the same for every request associated with the same user), or any stable user ID of your choice. Leave blank for non logged-in actions or guest checkout.", + "type": "string" + }, + "createAccountTime": { + "description": "Optional. Creation time for this account associated with this user. Leave blank for non logged-in actions, guest checkout, or when there is no account associated with the current user.", + "format": "google-datetime", + "type": "string" + }, + "userIds": { + "description": "Optional. Identifiers associated with this user or request.", + "items": { + "$ref": "GoogleCloudRecaptchaenterpriseV1UserId" + }, + "type": "array" + } + }, + "type": "object" + }, "GoogleCloudRecaptchaenterpriseV1WafSettings": { "description": "Settings specific to keys that can be used for WAF (Web Application Firewall).", "id": "GoogleCloudRecaptchaenterpriseV1WafSettings", diff --git a/googleapiclient/discovery_cache/documents/recommendationengine.v1beta1.json b/googleapiclient/discovery_cache/documents/recommendationengine.v1beta1.json index a3f37dfa50a..109f00c7638 100644 --- a/googleapiclient/discovery_cache/documents/recommendationengine.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/recommendationengine.v1beta1.json @@ -841,7 +841,7 @@ } } }, - "revision": "20231016", + "revision": "20231019", "rootUrl": "https://recommendationengine.googleapis.com/", "schemas": { "GoogleApiHttpBody": { diff --git a/googleapiclient/discovery_cache/documents/recommender.v1.json b/googleapiclient/discovery_cache/documents/recommender.v1.json index 988180abe99..fdeee5bba3a 100644 --- a/googleapiclient/discovery_cache/documents/recommender.v1.json +++ b/googleapiclient/discovery_cache/documents/recommender.v1.json @@ -1686,7 +1686,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://recommender.googleapis.com/", "schemas": { "GoogleCloudRecommenderV1CostProjection": { diff --git a/googleapiclient/discovery_cache/documents/recommender.v1beta1.json b/googleapiclient/discovery_cache/documents/recommender.v1beta1.json index 1541b77a8c8..532036df1b4 100644 --- a/googleapiclient/discovery_cache/documents/recommender.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/recommender.v1beta1.json @@ -1748,7 +1748,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://recommender.googleapis.com/", "schemas": { "GoogleCloudRecommenderV1beta1CostProjection": { diff --git a/googleapiclient/discovery_cache/documents/redis.v1.json b/googleapiclient/discovery_cache/documents/redis.v1.json index ac8259785b3..2fc03d34a1e 100644 --- a/googleapiclient/discovery_cache/documents/redis.v1.json +++ b/googleapiclient/discovery_cache/documents/redis.v1.json @@ -821,7 +821,7 @@ } } }, - "revision": "20231017", + "revision": "20231019", "rootUrl": "https://redis.googleapis.com/", "schemas": { "CertChain": { diff --git a/googleapiclient/discovery_cache/documents/redis.v1beta1.json b/googleapiclient/discovery_cache/documents/redis.v1beta1.json index 2248316af95..3f9c61074e8 100644 --- a/googleapiclient/discovery_cache/documents/redis.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/redis.v1beta1.json @@ -821,7 +821,7 @@ } } }, - "revision": "20231017", + "revision": "20231019", "rootUrl": "https://redis.googleapis.com/", "schemas": { "CertChain": { diff --git a/googleapiclient/discovery_cache/documents/reseller.v1.json b/googleapiclient/discovery_cache/documents/reseller.v1.json index c6730ab3e13..f7e5c462744 100644 --- a/googleapiclient/discovery_cache/documents/reseller.v1.json +++ b/googleapiclient/discovery_cache/documents/reseller.v1.json @@ -651,7 +651,7 @@ } } }, - "revision": "20231022", + "revision": "20231024", "rootUrl": "https://reseller.googleapis.com/", "schemas": { "Address": { diff --git a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json index b7f2c214588..b76323631c2 100644 --- a/googleapiclient/discovery_cache/documents/resourcesettings.v1.json +++ b/googleapiclient/discovery_cache/documents/resourcesettings.v1.json @@ -499,7 +499,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://resourcesettings.googleapis.com/", "schemas": { "GoogleCloudResourcesettingsV1ListSettingsResponse": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2.json b/googleapiclient/discovery_cache/documents/retail.v2.json index fde753a087b..6a080b6d155 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2.json +++ b/googleapiclient/discovery_cache/documents/retail.v2.json @@ -2059,7 +2059,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2alpha.json b/googleapiclient/discovery_cache/documents/retail.v2alpha.json index bdc6bcac1ef..955220ee24b 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/retail.v2alpha.json @@ -2388,7 +2388,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { diff --git a/googleapiclient/discovery_cache/documents/retail.v2beta.json b/googleapiclient/discovery_cache/documents/retail.v2beta.json index 3f49d8ef916..e84c41fd6c1 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2beta.json +++ b/googleapiclient/discovery_cache/documents/retail.v2beta.json @@ -2087,7 +2087,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { diff --git a/googleapiclient/discovery_cache/documents/run.v1.json b/googleapiclient/discovery_cache/documents/run.v1.json index c4b2e4a1e7b..dadcf6634aa 100644 --- a/googleapiclient/discovery_cache/documents/run.v1.json +++ b/googleapiclient/discovery_cache/documents/run.v1.json @@ -2289,7 +2289,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://run.googleapis.com/", "schemas": { "Addressable": { @@ -3590,7 +3590,7 @@ "additionalProperties": { "type": "string" }, - "description": "Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution.", + "description": "Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision. * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/disable-default-url`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service (ALPHA) * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution.", "type": "object" }, "clusterName": { diff --git a/googleapiclient/discovery_cache/documents/run.v2.json b/googleapiclient/discovery_cache/documents/run.v2.json index 8a6c25cba50..046af5165b4 100644 --- a/googleapiclient/discovery_cache/documents/run.v2.json +++ b/googleapiclient/discovery_cache/documents/run.v2.json @@ -1115,7 +1115,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://run.googleapis.com/", "schemas": { "GoogleCloudRunV2BinaryAuthorization": { @@ -2661,6 +2661,10 @@ "readOnly": true, "type": "boolean" }, + "scaling": { + "$ref": "GoogleCloudRunV2ServiceScaling", + "description": "Optional. Specifies service-level scaling settings" + }, "template": { "$ref": "GoogleCloudRunV2RevisionTemplate", "description": "Required. The template used to create revisions for this Service." @@ -2704,6 +2708,18 @@ }, "type": "object" }, + "GoogleCloudRunV2ServiceScaling": { + "description": "Scaling settings that apply to the service as a whole rather than the individual revision.", + "id": "GoogleCloudRunV2ServiceScaling", + "properties": { + "minInstanceCount": { + "description": "total min instances for the service. This number of instances will be divide among all revisions with specified traffic based on the percent of traffic they are receiving. (ALPHA)", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "GoogleCloudRunV2TCPSocketAction": { "description": "TCPSocketAction describes an action based on opening a socket", "id": "GoogleCloudRunV2TCPSocketAction", diff --git a/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json b/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json index 03ac9ef907f..fc37f54af61 100644 --- a/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json +++ b/googleapiclient/discovery_cache/documents/runtimeconfig.v1.json @@ -210,7 +210,7 @@ } } }, - "revision": "20231016", + "revision": "20231023", "rootUrl": "https://runtimeconfig.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/safebrowsing.v4.json b/googleapiclient/discovery_cache/documents/safebrowsing.v4.json index 1e2106c6021..3daecbac1e6 100644 --- a/googleapiclient/discovery_cache/documents/safebrowsing.v4.json +++ b/googleapiclient/discovery_cache/documents/safebrowsing.v4.json @@ -261,7 +261,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://safebrowsing.googleapis.com/", "schemas": { "GoogleProtobufEmpty": { diff --git a/googleapiclient/discovery_cache/documents/safebrowsing.v5.json b/googleapiclient/discovery_cache/documents/safebrowsing.v5.json index 3f2f8583aea..4b88e79adcb 100644 --- a/googleapiclient/discovery_cache/documents/safebrowsing.v5.json +++ b/googleapiclient/discovery_cache/documents/safebrowsing.v5.json @@ -121,7 +121,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://safebrowsing.googleapis.com/", "schemas": { "GoogleSecuritySafebrowsingV5FullHash": { diff --git a/googleapiclient/discovery_cache/documents/searchconsole.v1.json b/googleapiclient/discovery_cache/documents/searchconsole.v1.json index fe57d83a9d3..f55c2bc1576 100644 --- a/googleapiclient/discovery_cache/documents/searchconsole.v1.json +++ b/googleapiclient/discovery_cache/documents/searchconsole.v1.json @@ -400,7 +400,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://searchconsole.googleapis.com/", "schemas": { "AmpInspectionResult": { diff --git a/googleapiclient/discovery_cache/documents/secretmanager.v1.json b/googleapiclient/discovery_cache/documents/secretmanager.v1.json index 09c161d7900..ed5f32608fb 100644 --- a/googleapiclient/discovery_cache/documents/secretmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/secretmanager.v1.json @@ -643,7 +643,7 @@ } } }, - "revision": "20231006", + "revision": "20231025", "rootUrl": "https://secretmanager.googleapis.com/", "schemas": { "AccessSecretVersionResponse": { diff --git a/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json b/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json index 00ceed01519..d9423cb6aa2 100644 --- a/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/secretmanager.v1beta1.json @@ -628,7 +628,7 @@ } } }, - "revision": "20231006", + "revision": "20231025", "rootUrl": "https://secretmanager.googleapis.com/", "schemas": { "AccessSecretVersionResponse": { diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1.json index ab0414e19bf..d0d9c807b22 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1.json @@ -5110,7 +5110,7 @@ } } }, - "revision": "20231023", + "revision": "20231026", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json index 52e21def2de..01015758d2a 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json @@ -896,7 +896,7 @@ } } }, - "revision": "20231023", + "revision": "20231026", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json index 117ca420220..c2174435019 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json @@ -1906,7 +1906,7 @@ } } }, - "revision": "20231023", + "revision": "20231026", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json index bd7a2d42da8..51f79fcca26 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1.json @@ -542,7 +542,7 @@ } } }, - "revision": "20231018", + "revision": "20231029", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "AddTenantProjectRequest": { diff --git a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json index a26b5d6038c..890bf90bbe6 100644 --- a/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceconsumermanagement.v1beta1.json @@ -500,7 +500,7 @@ } } }, - "revision": "20231018", + "revision": "20231029", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "Api": { diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json index 91f66a87bda..5f02fdafbb7 100644 --- a/googleapiclient/discovery_cache/documents/servicecontrol.v1.json +++ b/googleapiclient/discovery_cache/documents/servicecontrol.v1.json @@ -197,7 +197,7 @@ } } }, - "revision": "20231016", + "revision": "20231019", "rootUrl": "https://servicecontrol.googleapis.com/", "schemas": { "AllocateInfo": { diff --git a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json index 8b9d6792cc2..df8c45b9d99 100644 --- a/googleapiclient/discovery_cache/documents/servicecontrol.v2.json +++ b/googleapiclient/discovery_cache/documents/servicecontrol.v2.json @@ -169,7 +169,7 @@ } } }, - "revision": "20231016", + "revision": "20231030", "rootUrl": "https://servicecontrol.googleapis.com/", "schemas": { "Api": { diff --git a/googleapiclient/discovery_cache/documents/servicedirectory.v1.json b/googleapiclient/discovery_cache/documents/servicedirectory.v1.json index ef003f8735a..da3fb94633b 100644 --- a/googleapiclient/discovery_cache/documents/servicedirectory.v1.json +++ b/googleapiclient/discovery_cache/documents/servicedirectory.v1.json @@ -883,7 +883,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://servicedirectory.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json b/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json index a52b0f17478..e3c54eb5433 100644 --- a/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/servicedirectory.v1beta1.json @@ -971,7 +971,7 @@ } } }, - "revision": "20231015", + "revision": "20231022", "rootUrl": "https://servicedirectory.googleapis.com/", "schemas": { "Binding": { diff --git a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json index 9bcff4e886f..b6158678748 100644 --- a/googleapiclient/discovery_cache/documents/servicemanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/servicemanagement.v1.json @@ -830,7 +830,7 @@ } } }, - "revision": "20231013", + "revision": "20231020", "rootUrl": "https://servicemanagement.googleapis.com/", "schemas": { "Advice": { diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json index 5ea474bcc3d..35b365d8164 100644 --- a/googleapiclient/discovery_cache/documents/servicenetworking.v1.json +++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1.json @@ -789,6 +789,32 @@ "https://www.googleapis.com/auth/service.management" ] }, + "getVpcServiceControls": { + "description": "Consumers use this method to find out the state of VPC Service Controls. The controls could be enabled or disabled for a connection.", + "flatPath": "v1/services/{servicesId}/projects/{projectsId}/global/networks/{networksId}/vpcServiceControls", + "httpMethod": "GET", + "id": "servicenetworking.services.projects.global.networks.getVpcServiceControls", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. Name of the VPC Service Controls config to retrieve in the format: `services/{service}/projects/{project}/global/networks/{network}`. {service} is the peering service that is managing connectivity for the service producer's organization. For Google services that support this functionality, this value is `servicenetworking.googleapis.com`. {project} is a project number e.g. `12345` that contains the service consumer's VPC network. {network} is the name of the service consumer's VPC network.", + "location": "path", + "pattern": "^services/[^/]+/projects/[^/]+/global/networks/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}/vpcServiceControls", + "response": { + "$ref": "VpcServiceControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/service.management" + ] + }, "updateConsumerConfig": { "description": "Service producers use this method to update the configuration of their connection including the import/export of custom routes and subnetwork routes with public IP.", "flatPath": "v1/services/{servicesId}/projects/{projectsId}/global/networks/{networksId}:updateConsumerConfig", @@ -1003,7 +1029,7 @@ } } }, - "revision": "20231004", + "revision": "20231029", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { @@ -1138,6 +1164,10 @@ "description": "Optional. Description of the subnet.", "type": "string" }, + "internalRange": { + "description": "Optional. The url of an Internal Range. Eg: `projects//locations/global/internalRanges/`. If specified, it means that the subnetwork cidr will be created using the combination of requested_address/ip_prefix_length. Note that the subnet cidr has to be within the cidr range of this Internal Range.", + "type": "string" + }, "ipPrefixLength": { "description": "Required. The prefix length of the subnet's IP address range. Use CIDR range notation, such as `29` to provision a subnet with an `x.x.x.x/29` CIDR range. The IP address range is drawn from a pool of available ranges in the service consumer's allocated range. GCE disallows subnets with prefix_length > 29", "format": "int32", @@ -4048,6 +4078,18 @@ } }, "type": "object" + }, + "VpcServiceControls": { + "description": "Response for the get VPC Service Controls request.", + "id": "VpcServiceControls", + "properties": { + "enabled": { + "description": "Output only. Indicates whether the VPC Service Controls are enabled or disabled for the connection. If the consumer called the EnableVpcServiceControls method, then this is true. If the consumer called DisableVpcServiceControls, then this is false. The default is false.", + "readOnly": true, + "type": "boolean" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json index b51c069a9cf..7e29dd7c7a6 100644 --- a/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json +++ b/googleapiclient/discovery_cache/documents/servicenetworking.v1beta.json @@ -307,7 +307,7 @@ } } }, - "revision": "20231004", + "revision": "20231029", "rootUrl": "https://servicenetworking.googleapis.com/", "schemas": { "AddDnsRecordSetMetadata": { @@ -2896,6 +2896,18 @@ } }, "type": "object" + }, + "VpcServiceControls": { + "description": "Response for the get VPC Service Controls request.", + "id": "VpcServiceControls", + "properties": { + "enabled": { + "description": "Output only. Indicates whether the VPC Service Controls are enabled or disabled for the connection. If the consumer called the EnableVpcServiceControls method, then this is true. If the consumer called DisableVpcServiceControls, then this is false. The default is false.", + "readOnly": true, + "type": "boolean" + } + }, + "type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1.json index 8d92538d46a..cc4b6efdb7b 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1.json @@ -426,7 +426,7 @@ } } }, - "revision": "20231018", + "revision": "20231029", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AddEnableRulesMetadata": { diff --git a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json index eeba79a5e5a..41e2675b930 100644 --- a/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/serviceusage.v1beta1.json @@ -964,7 +964,7 @@ } } }, - "revision": "20231018", + "revision": "20231029", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "AddEnableRulesMetadata": { diff --git a/googleapiclient/discovery_cache/documents/slides.v1.json b/googleapiclient/discovery_cache/documents/slides.v1.json index 5b62e6834a6..820efa37504 100644 --- a/googleapiclient/discovery_cache/documents/slides.v1.json +++ b/googleapiclient/discovery_cache/documents/slides.v1.json @@ -313,7 +313,7 @@ } } }, - "revision": "20231017", + "revision": "20231024", "rootUrl": "https://slides.googleapis.com/", "schemas": { "AffineTransform": { diff --git a/googleapiclient/discovery_cache/documents/spanner.v1.json b/googleapiclient/discovery_cache/documents/spanner.v1.json index b346f54bc09..9eb412c54b0 100644 --- a/googleapiclient/discovery_cache/documents/spanner.v1.json +++ b/googleapiclient/discovery_cache/documents/spanner.v1.json @@ -2604,6 +2604,65 @@ "revision": "20231017", "rootUrl": "https://spanner.googleapis.com/", "schemas": { + "AutoscalingConfig": { + "description": "Autoscaling config for an instance.", + "id": "AutoscalingConfig", + "properties": { + "autoscalingLimits": { + "$ref": "AutoscalingLimits", + "description": "Required. Autoscaling limits for an instance." + }, + "autoscalingTargets": { + "$ref": "AutoscalingTargets", + "description": "Required. The autoscaling targets for an instance." + } + }, + "type": "object" + }, + "AutoscalingLimits": { + "description": "The autoscaling limits for the instance. Users can define the minimum and maximum compute capacity allocated to the instance, and the autoscaler will only scale within that range. Users can either use nodes or processing units to specify the limits, but should use the same unit to set both the min_limit and max_limit.", + "id": "AutoscalingLimits", + "properties": { + "maxNodes": { + "description": "Maximum number of nodes allocated to the instance. If set, this number should be greater than or equal to min_nodes.", + "format": "int32", + "type": "integer" + }, + "maxProcessingUnits": { + "description": "Maximum number of processing units allocated to the instance. If set, this number should be multiples of 1000 and be greater than or equal to min_processing_units.", + "format": "int32", + "type": "integer" + }, + "minNodes": { + "description": "Minimum number of nodes allocated to the instance. If set, this number should be greater than or equal to 1.", + "format": "int32", + "type": "integer" + }, + "minProcessingUnits": { + "description": "Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "AutoscalingTargets": { + "description": "The autoscaling targets for an instance.", + "id": "AutoscalingTargets", + "properties": { + "highPriorityCpuUtilizationPercent": { + "description": "Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive.", + "format": "int32", + "type": "integer" + }, + "storageUtilizationPercent": { + "description": "Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 100] inclusive.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "Backup": { "description": "A backup of a Cloud Spanner database.", "id": "Backup", @@ -3717,6 +3776,10 @@ "description": "An isolated set of Cloud Spanner resources on which databases can be hosted.", "id": "Instance", "properties": { + "autoscalingConfig": { + "$ref": "AutoscalingConfig", + "description": "Optional. The autoscaling configuration. Autoscaling is enabled if this field is set. When autoscaling is enabled, node_count and processing_units are treated as OUTPUT_ONLY fields and reflect the current compute capacity allocated to the instance." + }, "config": { "description": "Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs.", "type": "string" diff --git a/googleapiclient/discovery_cache/documents/speech.v1.json b/googleapiclient/discovery_cache/documents/speech.v1.json index 1bf51173ac7..a6f34dcf7e5 100644 --- a/googleapiclient/discovery_cache/documents/speech.v1.json +++ b/googleapiclient/discovery_cache/documents/speech.v1.json @@ -524,7 +524,7 @@ } } }, - "revision": "20231012", + "revision": "20231024", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ABNFGrammar": { diff --git a/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json b/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json index a7b7d9bbcf9..9defaa7b784 100644 --- a/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/speech.v1p1beta1.json @@ -524,7 +524,7 @@ } } }, - "revision": "20231012", + "revision": "20231024", "rootUrl": "https://speech.googleapis.com/", "schemas": { "ABNFGrammar": { diff --git a/googleapiclient/discovery_cache/documents/sqladmin.v1.json b/googleapiclient/discovery_cache/documents/sqladmin.v1.json index f88b2c61c23..d4b4f83dad9 100644 --- a/googleapiclient/discovery_cache/documents/sqladmin.v1.json +++ b/googleapiclient/discovery_cache/documents/sqladmin.v1.json @@ -2165,7 +2165,7 @@ } } }, - "revision": "20231011", + "revision": "20231017", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -3789,12 +3789,12 @@ "type": "boolean" }, "stopAt": { - "description": "Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only", + "description": "Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only.", "format": "google-datetime", "type": "string" }, "stopAtMark": { - "description": "Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only", + "description": "Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only.", "type": "string" }, "striped": { @@ -4101,11 +4101,11 @@ "description": "PSC settings for this instance." }, "requireSsl": { - "description": "LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix)", + "description": "Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag.", "type": "boolean" }, "sslMode": { - "description": "Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means \"only accepts SSL connection\", while the `require_ssl=false` means \"both non-SSL and SSL connections are allowed\". The database will respect `ssl_mode` in this case and only accept SSL connections.", + "description": "Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means \"only accepts SSL connection\", while the `require_ssl=false` means \"both non-SSL and SSL connections are allowed\". The database respects `ssl_mode` in this case and only accepts SSL connections.", "enum": [ "SSL_MODE_UNSPECIFIED", "ALLOW_UNENCRYPTED_AND_ENCRYPTED", @@ -4113,10 +4113,10 @@ "TRUSTED_CLIENT_CERTIFICATE_REQUIRED" ], "enumDescriptions": [ - "SSL mode is unknown.", - "Allow non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. When this value is used, legacy `require_ssl` flag must be false or unset to avoid the conflict between values of two flags.", - "Only allow connections encrypted with SSL/TLS. When this value is used, legacy `require_ssl` flag must be false or unset to avoid the conflict between values of two flags.", - "Only allow connections encrypted with SSL/TLS and with valid client certificates. When this value is used, legacy `require_ssl` flag must be true or unset to avoid the conflict between values of two flags." + "The SSL mode is unknown.", + "Allow non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. When this value is used, the legacy `require_ssl` flag must be false or cleared to avoid the conflict between values of two flags.", + "Only allow connections encrypted with SSL/TLS. When this value is used, the legacy `require_ssl` flag must be false or cleared to avoid the conflict between values of two flags.", + "Only allow connections encrypted with SSL/TLS and with valid client certificates. When this value is used, the legacy `require_ssl` flag must be true or cleared to avoid the conflict between values of two flags." ], "type": "string" } @@ -4669,10 +4669,6 @@ ], "type": "string" }, - "disallowCompromisedCredentials": { - "description": "Disallow credentials that have been previously compromised by a public data breach.", - "type": "boolean" - }, "disallowUsernameSubstring": { "description": "Disallow username as a part of the password.", "type": "boolean" @@ -5698,12 +5694,18 @@ "enum": [ "BUILT_IN", "CLOUD_IAM_USER", - "CLOUD_IAM_SERVICE_ACCOUNT" + "CLOUD_IAM_SERVICE_ACCOUNT", + "CLOUD_IAM_GROUP", + "CLOUD_IAM_GROUP_USER", + "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" ], "enumDescriptions": [ "The database's built-in user type.", "Cloud IAM user.", - "Cloud IAM service account." + "Cloud IAM service account.", + "Cloud IAM Group non-login user.", + "Cloud IAM Group login user.", + "Cloud IAM Group login service account." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/sqladmin.v1beta4.json b/googleapiclient/discovery_cache/documents/sqladmin.v1beta4.json index bc860b41d29..665d895c06f 100644 --- a/googleapiclient/discovery_cache/documents/sqladmin.v1beta4.json +++ b/googleapiclient/discovery_cache/documents/sqladmin.v1beta4.json @@ -2165,7 +2165,7 @@ } } }, - "revision": "20231011", + "revision": "20231017", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -3789,12 +3789,12 @@ "type": "boolean" }, "stopAt": { - "description": "Optional. StopAt keyword for transaction log import, Applies to Cloud SQL for SQL Server only", + "description": "Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only.", "format": "google-datetime", "type": "string" }, "stopAtMark": { - "description": "Optional. StopAtMark keyword for transaction log import, Applies to Cloud SQL for SQL Server only", + "description": "Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only.", "type": "string" }, "striped": { @@ -4101,11 +4101,11 @@ "description": "PSC settings for this instance." }, "requireSsl": { - "description": "LINT.IfChange(require_ssl_deprecate) Whether SSL/TLS connections over IP are enforced or not. If set to false, allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. If set to true, only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. LINT.ThenChange(//depot/google3/java/com/google/storage/speckle/boss/admin/actions/InstanceUpdateAction.java:update_api_temp_fix)", + "description": "Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag.", "type": "boolean" }, "sslMode": { - "description": "Specify how SSL/TLS will be enforced in database connections. This flag is only supported for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, it is recommended to use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED, require_ssl=false; ssl_mode=ENCRYPTED_ONLY, require_ssl=false; ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED, require_ssl=true; Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means \"only accepts SSL connection\", while the `require_ssl=false` means \"both non-SSL and SSL connections are allowed\". The database will respect `ssl_mode` in this case and only accept SSL connections.", + "description": "Specify how SSL/TLS is enforced in database connections. This flag is supported only for PostgreSQL. Use the legacy `require_ssl` flag for enforcing SSL/TLS in MySQL and SQL Server. But, for PostgreSQL, use the `ssl_mode` flag instead of the legacy `require_ssl` flag. To avoid the conflict between those flags in PostgreSQL, only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` Note that the value of `ssl_mode` gets priority over the value of the legacy `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY, require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means \"only accepts SSL connection\", while the `require_ssl=false` means \"both non-SSL and SSL connections are allowed\". The database respects `ssl_mode` in this case and only accepts SSL connections.", "enum": [ "SSL_MODE_UNSPECIFIED", "ALLOW_UNENCRYPTED_AND_ENCRYPTED", @@ -4113,10 +4113,10 @@ "TRUSTED_CLIENT_CERTIFICATE_REQUIRED" ], "enumDescriptions": [ - "SSL mode is unknown.", - "Allow non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate will not be verified. When this value is used, legacy `require_ssl` flag must be false or unset to avoid the conflict between values of two flags.", - "Only allow connections encrypted with SSL/TLS. When this value is used, legacy `require_ssl` flag must be false or unset to avoid the conflict between values of two flags.", - "Only allow connections encrypted with SSL/TLS and with valid client certificates. When this value is used, legacy `require_ssl` flag must be true or unset to avoid the conflict between values of two flags." + "The SSL mode is unknown.", + "Allow non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. When this value is used, the legacy `require_ssl` flag must be false or cleared to avoid the conflict between values of two flags.", + "Only allow connections encrypted with SSL/TLS. When this value is used, the legacy `require_ssl` flag must be false or cleared to avoid the conflict between values of two flags.", + "Only allow connections encrypted with SSL/TLS and with valid client certificates. When this value is used, the legacy `require_ssl` flag must be true or cleared to avoid the conflict between values of two flags." ], "type": "string" } @@ -4669,10 +4669,6 @@ ], "type": "string" }, - "disallowCompromisedCredentials": { - "description": "Disallow credentials that have been previously compromised by a public data breach.", - "type": "boolean" - }, "disallowUsernameSubstring": { "description": "Disallow username as a part of the password.", "type": "boolean" @@ -5696,12 +5692,18 @@ "enum": [ "BUILT_IN", "CLOUD_IAM_USER", - "CLOUD_IAM_SERVICE_ACCOUNT" + "CLOUD_IAM_SERVICE_ACCOUNT", + "CLOUD_IAM_GROUP", + "CLOUD_IAM_GROUP_USER", + "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" ], "enumDescriptions": [ "The database's built-in user type.", "Cloud IAM user.", - "Cloud IAM service account." + "Cloud IAM service account.", + "Cloud IAM Group non-login user.", + "Cloud IAM Group login user.", + "Cloud IAM Group service account." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/storage.v1.json b/googleapiclient/discovery_cache/documents/storage.v1.json index 566a3b973c9..04b22a4abc7 100644 --- a/googleapiclient/discovery_cache/documents/storage.v1.json +++ b/googleapiclient/discovery_cache/documents/storage.v1.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"38393830393831373134383332353836373131\"", + "etag": "\"31353735383930303834363437333933373131\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3563,7 +3563,7 @@ } } }, - "revision": "20231020", + "revision": "20231024", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { diff --git a/googleapiclient/discovery_cache/documents/storagetransfer.v1.json b/googleapiclient/discovery_cache/documents/storagetransfer.v1.json index e501add4184..d99e24b490a 100644 --- a/googleapiclient/discovery_cache/documents/storagetransfer.v1.json +++ b/googleapiclient/discovery_cache/documents/storagetransfer.v1.json @@ -632,7 +632,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://storagetransfer.googleapis.com/", "schemas": { "AgentPool": { diff --git a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json index d2b788d2d3a..bf765080275 100644 --- a/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json +++ b/googleapiclient/discovery_cache/documents/streetviewpublish.v1.json @@ -534,7 +534,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://streetviewpublish.googleapis.com/", "schemas": { "BatchDeletePhotosRequest": { diff --git a/googleapiclient/discovery_cache/documents/sts.v1.json b/googleapiclient/discovery_cache/documents/sts.v1.json index 0c6a94e9ae8..8c950aae050 100644 --- a/googleapiclient/discovery_cache/documents/sts.v1.json +++ b/googleapiclient/discovery_cache/documents/sts.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231016", + "revision": "20231022", "rootUrl": "https://sts.googleapis.com/", "schemas": { "GoogleIamV1Binding": { diff --git a/googleapiclient/discovery_cache/documents/sts.v1beta.json b/googleapiclient/discovery_cache/documents/sts.v1beta.json index 46fded0e1b9..97ccdd3c004 100644 --- a/googleapiclient/discovery_cache/documents/sts.v1beta.json +++ b/googleapiclient/discovery_cache/documents/sts.v1beta.json @@ -116,7 +116,7 @@ } } }, - "revision": "20231016", + "revision": "20231022", "rootUrl": "https://sts.googleapis.com/", "schemas": { "GoogleIamV1Binding": { diff --git a/googleapiclient/discovery_cache/documents/tagmanager.v1.json b/googleapiclient/discovery_cache/documents/tagmanager.v1.json index 678bd59ee61..da12a68dd5b 100644 --- a/googleapiclient/discovery_cache/documents/tagmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/tagmanager.v1.json @@ -1932,7 +1932,7 @@ } } }, - "revision": "20231018", + "revision": "20231027", "rootUrl": "https://tagmanager.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/tagmanager.v2.json b/googleapiclient/discovery_cache/documents/tagmanager.v2.json index fc0e28466a0..7de9f66763f 100644 --- a/googleapiclient/discovery_cache/documents/tagmanager.v2.json +++ b/googleapiclient/discovery_cache/documents/tagmanager.v2.json @@ -3890,7 +3890,7 @@ } } }, - "revision": "20231018", + "revision": "20231027", "rootUrl": "https://tagmanager.googleapis.com/", "schemas": { "Account": { diff --git a/googleapiclient/discovery_cache/documents/tasks.v1.json b/googleapiclient/discovery_cache/documents/tasks.v1.json index e577026ab8b..a9ab7d90521 100644 --- a/googleapiclient/discovery_cache/documents/tasks.v1.json +++ b/googleapiclient/discovery_cache/documents/tasks.v1.json @@ -566,7 +566,7 @@ } } }, - "revision": "20231021", + "revision": "20231029", "rootUrl": "https://tasks.googleapis.com/", "schemas": { "Task": { diff --git a/googleapiclient/discovery_cache/documents/testing.v1.json b/googleapiclient/discovery_cache/documents/testing.v1.json index b82e0814a05..6a004918591 100644 --- a/googleapiclient/discovery_cache/documents/testing.v1.json +++ b/googleapiclient/discovery_cache/documents/testing.v1.json @@ -442,7 +442,7 @@ } } }, - "revision": "20231017", + "revision": "20231023", "rootUrl": "https://testing.googleapis.com/", "schemas": { "Account": { @@ -1161,11 +1161,6 @@ "$ref": "AndroidDevice", "description": "Required. The requested device" }, - "androidDeviceList": { - "$ref": "AndroidDeviceList", - "deprecated": true, - "description": "Optional. The list of requested devices. At most two devices may be simultaneously requested." - }, "createTime": { "description": "Output only. The time that the Session was created.", "format": "google-datetime", @@ -1226,7 +1221,7 @@ "type": "array" }, "ttl": { - "description": "Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the ExtendDeviceSession RPC. Default: 30 minutes.", + "description": "Optional. The amount of time that a device will be initially allocated for. This can eventually be extended with the UpdateDeviceSession RPC. Default: 30 minutes.", "format": "google-duration", "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1.json index 7322c368789..6f6a0189071 100644 --- a/googleapiclient/discovery_cache/documents/texttospeech.v1.json +++ b/googleapiclient/discovery_cache/documents/texttospeech.v1.json @@ -318,7 +318,7 @@ } } }, - "revision": "20231013", + "revision": "20231023", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { "AudioConfig": { @@ -390,7 +390,8 @@ "type": "string" }, "reportedUsage": { - "description": "Optional. The usage of the synthesized audio to be reported.", + "deprecated": true, + "description": "Optional. Deprecated. The usage of the synthesized audio to be reported.", "enum": [ "REPORTED_USAGE_UNSPECIFIED", "REALTIME", diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json index a988d5e24d8..1610cb59677 100644 --- a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json @@ -261,7 +261,7 @@ } } }, - "revision": "20231013", + "revision": "20231023", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { "AudioConfig": { @@ -329,7 +329,8 @@ "type": "string" }, "reportedUsage": { - "description": "Optional. The usage of the synthesized audio to be reported.", + "deprecated": true, + "description": "Optional. Deprecated. The usage of the synthesized audio to be reported.", "enum": [ "REPORTED_USAGE_UNSPECIFIED", "REALTIME", diff --git a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json index 338ec8182bf..f458727927b 100644 --- a/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json +++ b/googleapiclient/discovery_cache/documents/toolresults.v1beta3.json @@ -1463,7 +1463,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://toolresults.googleapis.com/", "schemas": { "ANR": { diff --git a/googleapiclient/discovery_cache/documents/tpu.v1.json b/googleapiclient/discovery_cache/documents/tpu.v1.json index 0f46b0e7cc8..365e30f4190 100644 --- a/googleapiclient/discovery_cache/documents/tpu.v1.json +++ b/googleapiclient/discovery_cache/documents/tpu.v1.json @@ -659,7 +659,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorType": { diff --git a/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json b/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json index 156e9fe3a48..4778f685480 100644 --- a/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json @@ -669,7 +669,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorType": { diff --git a/googleapiclient/discovery_cache/documents/tpu.v2.json b/googleapiclient/discovery_cache/documents/tpu.v2.json index 99bf0ada783..3d3cca44e98 100644 --- a/googleapiclient/discovery_cache/documents/tpu.v2.json +++ b/googleapiclient/discovery_cache/documents/tpu.v2.json @@ -721,7 +721,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorConfig": { diff --git a/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json b/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json index 6638f3ec8d6..fa2d463c17e 100644 --- a/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json +++ b/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json @@ -925,7 +925,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorConfig": { diff --git a/googleapiclient/discovery_cache/documents/trafficdirector.v2.json b/googleapiclient/discovery_cache/documents/trafficdirector.v2.json index 54eb6f31448..54037fae115 100644 --- a/googleapiclient/discovery_cache/documents/trafficdirector.v2.json +++ b/googleapiclient/discovery_cache/documents/trafficdirector.v2.json @@ -128,7 +128,7 @@ } } }, - "revision": "20230927", + "revision": "20231025", "rootUrl": "https://trafficdirector.googleapis.com/", "schemas": { "Address": { diff --git a/googleapiclient/discovery_cache/documents/transcoder.v1.json b/googleapiclient/discovery_cache/documents/transcoder.v1.json index f072c1023de..7bd0ed679f0 100644 --- a/googleapiclient/discovery_cache/documents/transcoder.v1.json +++ b/googleapiclient/discovery_cache/documents/transcoder.v1.json @@ -385,7 +385,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://transcoder.googleapis.com/", "schemas": { "AdBreak": { diff --git a/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json b/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json index d4d47c3863b..6bcaced8702 100644 --- a/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json +++ b/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json @@ -116,7 +116,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://travelimpactmodel.googleapis.com/", "schemas": { "ComputeFlightEmissionsRequest": { diff --git a/googleapiclient/discovery_cache/documents/verifiedaccess.v1.json b/googleapiclient/discovery_cache/documents/verifiedaccess.v1.json index fb3d5bdc9ec..c7283fafaa2 100644 --- a/googleapiclient/discovery_cache/documents/verifiedaccess.v1.json +++ b/googleapiclient/discovery_cache/documents/verifiedaccess.v1.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231003", + "revision": "20231024", "rootUrl": "https://verifiedaccess.googleapis.com/", "schemas": { "Challenge": { diff --git a/googleapiclient/discovery_cache/documents/verifiedaccess.v2.json b/googleapiclient/discovery_cache/documents/verifiedaccess.v2.json index 46db4d996d2..05a0d79d6e5 100644 --- a/googleapiclient/discovery_cache/documents/verifiedaccess.v2.json +++ b/googleapiclient/discovery_cache/documents/verifiedaccess.v2.json @@ -146,7 +146,7 @@ } } }, - "revision": "20231003", + "revision": "20231024", "rootUrl": "https://verifiedaccess.googleapis.com/", "schemas": { "Challenge": { diff --git a/googleapiclient/discovery_cache/documents/versionhistory.v1.json b/googleapiclient/discovery_cache/documents/versionhistory.v1.json index 542cd018e82..ec852bddeb4 100644 --- a/googleapiclient/discovery_cache/documents/versionhistory.v1.json +++ b/googleapiclient/discovery_cache/documents/versionhistory.v1.json @@ -271,7 +271,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://versionhistory.googleapis.com/", "schemas": { "Channel": { diff --git a/googleapiclient/discovery_cache/documents/vision.v1.json b/googleapiclient/discovery_cache/documents/vision.v1.json index 42a9c789e75..d8df1899e22 100644 --- a/googleapiclient/discovery_cache/documents/vision.v1.json +++ b/googleapiclient/discovery_cache/documents/vision.v1.json @@ -1282,7 +1282,7 @@ } } }, - "revision": "20231006", + "revision": "20231020", "rootUrl": "https://vision.googleapis.com/", "schemas": { "AddProductToProductSetRequest": { diff --git a/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json b/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json index 82393aa9cb0..4dcbd7de7b2 100644 --- a/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json +++ b/googleapiclient/discovery_cache/documents/vision.v1p1beta1.json @@ -449,7 +449,7 @@ } } }, - "revision": "20231006", + "revision": "20231020", "rootUrl": "https://vision.googleapis.com/", "schemas": { "AnnotateFileResponse": { diff --git a/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json b/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json index 282fcd22ab6..7b6409ff690 100644 --- a/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json +++ b/googleapiclient/discovery_cache/documents/vision.v1p2beta1.json @@ -449,7 +449,7 @@ } } }, - "revision": "20231006", + "revision": "20231020", "rootUrl": "https://vision.googleapis.com/", "schemas": { "AnnotateFileResponse": { diff --git a/googleapiclient/discovery_cache/documents/vmmigration.v1.json b/googleapiclient/discovery_cache/documents/vmmigration.v1.json index fc788973c51..ffd467d0e89 100644 --- a/googleapiclient/discovery_cache/documents/vmmigration.v1.json +++ b/googleapiclient/discovery_cache/documents/vmmigration.v1.json @@ -1972,7 +1972,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://vmmigration.googleapis.com/", "schemas": { "AccessKeyCredentials": { @@ -2618,6 +2618,10 @@ ], "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. The encryption to apply to the boot disk." + }, "image": { "$ref": "DiskImageDefaults", "description": "The image to use when creating the disk." @@ -2872,6 +2876,10 @@ ], "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. Immutable. The encryption to apply to the VM disks." + }, "hostname": { "description": "The hostname to assign to the VM.", "type": "string" @@ -2998,6 +3006,10 @@ ], "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. The encryption to apply to the VM disks." + }, "hostname": { "description": "The hostname to assign to the VM.", "type": "string" @@ -3444,6 +3456,10 @@ "$ref": "ComputeScheduling", "description": "Optional. Compute instance scheduling information (if empty default is used)." }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. The encryption to apply to the VM." + }, "hostname": { "description": "Optional. The hostname to assign to the VM.", "type": "string" @@ -3517,6 +3533,17 @@ "properties": {}, "type": "object" }, + "Encryption": { + "description": "Encryption message describes the details of the applied encryption.", + "id": "Encryption", + "properties": { + "kmsKey": { + "description": "Required. The name of the encryption key that is stored in Google Cloud KMS.", + "type": "string" + } + }, + "type": "object" + }, "FetchInventoryResponse": { "description": "Response message for fetchInventory.", "id": "FetchInventoryResponse", @@ -4139,8 +4166,7 @@ "CLONE_ERROR", "CUTOVER_ERROR", "UTILIZATION_REPORT_ERROR", - "APPLIANCE_UPGRADE_ERROR", - "IMAGE_IMPORT_ERROR" + "APPLIANCE_UPGRADE_ERROR" ], "enumDescriptions": [ "Default value. This value is not used.", @@ -4152,8 +4178,7 @@ "Migrate to Virtual Machines encountered an error in clone operation.", "Migrate to Virtual Machines encountered an error in cutover operation.", "Migrate to Virtual Machines encountered an error during utilization report creation.", - "Migrate to Virtual Machines encountered an error during appliance upgrade.", - "Migrate to Virtual Machines encountered an error in image import operation." + "Migrate to Virtual Machines encountered an error during appliance upgrade." ], "readOnly": true, "type": "string" @@ -4417,6 +4442,10 @@ ], "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. The encryption to apply to the disk." + }, "sourceDiskNumber": { "description": "Required. The ordinal number of the source VM disk.", "format": "int32", @@ -4647,6 +4676,10 @@ "description": "User-provided description of the source.", "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. Immutable. The encryption details of the source data stored by the service." + }, "labels": { "additionalProperties": { "type": "string" diff --git a/googleapiclient/discovery_cache/documents/vmmigration.v1alpha1.json b/googleapiclient/discovery_cache/documents/vmmigration.v1alpha1.json index f1681aab573..ebf1d982dd9 100644 --- a/googleapiclient/discovery_cache/documents/vmmigration.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/vmmigration.v1alpha1.json @@ -1972,7 +1972,7 @@ } } }, - "revision": "20231012", + "revision": "20231019", "rootUrl": "https://vmmigration.googleapis.com/", "schemas": { "AccessKeyCredentials": { @@ -2618,6 +2618,10 @@ ], "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. The encryption to apply to the boot disk." + }, "image": { "$ref": "DiskImageDefaults", "description": "The image to use when creating the disk." @@ -2884,6 +2888,10 @@ ], "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. Immutable. The encryption to apply to the VM disks." + }, "hostname": { "description": "The hostname to assign to the VM.", "type": "string" @@ -3010,6 +3018,10 @@ ], "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. The encryption to apply to the VM disks." + }, "hostname": { "description": "The hostname to assign to the VM.", "type": "string" @@ -3479,6 +3491,10 @@ "$ref": "ComputeScheduling", "description": "Optional. Compute instance scheduling information (if empty default is used)." }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. The encryption to apply to the VM." + }, "hostname": { "description": "Optional. The hostname to assign to the VM.", "type": "string" @@ -3552,6 +3568,17 @@ "properties": {}, "type": "object" }, + "Encryption": { + "description": "Encryption message describes the details of the applied encryption.", + "id": "Encryption", + "properties": { + "kmsKey": { + "description": "Required. The name of the encryption key that is stored in Google Cloud KMS.", + "type": "string" + } + }, + "type": "object" + }, "FetchInventoryResponse": { "description": "Response message for fetchInventory.", "id": "FetchInventoryResponse", @@ -4184,8 +4211,7 @@ "CLONE_ERROR", "CUTOVER_ERROR", "UTILIZATION_REPORT_ERROR", - "APPLIANCE_UPGRADE_ERROR", - "IMAGE_IMPORT_ERROR" + "APPLIANCE_UPGRADE_ERROR" ], "enumDescriptions": [ "Default value. This value is not used.", @@ -4197,8 +4223,7 @@ "Migrate to Virtual Machines encountered an error in clone operation.", "Migrate to Virtual Machines encountered an error in cutover operation.", "Migrate to Virtual Machines encountered an error during utilization report creation.", - "Migrate to Virtual Machines encountered an error during appliance upgrade.", - "Migrate to Virtual Machines encountered an error in image import operation." + "Migrate to Virtual Machines encountered an error during appliance upgrade." ], "readOnly": true, "type": "string" @@ -4462,6 +4487,10 @@ ], "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. The encryption to apply to the disk." + }, "sourceDiskNumber": { "description": "Required. The ordinal number of the source VM disk.", "format": "int32", @@ -4698,6 +4727,10 @@ "description": "User-provided description of the source.", "type": "string" }, + "encryption": { + "$ref": "Encryption", + "description": "Optional. Immutable. The encryption details of the source data stored by the service." + }, "error": { "$ref": "Status", "deprecated": true, diff --git a/googleapiclient/discovery_cache/documents/vpcaccess.v1beta1.json b/googleapiclient/discovery_cache/documents/vpcaccess.v1beta1.json index 8cf460c2615..70daf2478cf 100644 --- a/googleapiclient/discovery_cache/documents/vpcaccess.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/vpcaccess.v1beta1.json @@ -384,7 +384,7 @@ } } }, - "revision": "20231005", + "revision": "20231016", "rootUrl": "https://vpcaccess.googleapis.com/", "schemas": { "Connector": { diff --git a/googleapiclient/discovery_cache/documents/webrisk.v1.json b/googleapiclient/discovery_cache/documents/webrisk.v1.json index 8912a28fc76..77a61fcd9b6 100644 --- a/googleapiclient/discovery_cache/documents/webrisk.v1.json +++ b/googleapiclient/discovery_cache/documents/webrisk.v1.json @@ -420,7 +420,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://webrisk.googleapis.com/", "schemas": { "GoogleCloudWebriskV1ComputeThreatListDiffResponse": { diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json index ac8b1895399..3b4cf741040 100644 --- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json +++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1.json @@ -526,7 +526,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://websecurityscanner.googleapis.com/", "schemas": { "Authentication": { @@ -1029,7 +1029,8 @@ "UNSUPPORTED_BLACKLIST_PATTERN_FORMAT", "UNSUPPORTED_FILTER", "UNSUPPORTED_FINDING_TYPE", - "UNSUPPORTED_URL_SCHEME" + "UNSUPPORTED_URL_SCHEME", + "CLOUD_ASSET_INVENTORY_ASSET_NOT_FOUND" ], "enumDescriptions": [ "There is no error.", @@ -1073,7 +1074,8 @@ "One or more blacklist patterns were in the wrong format.", "The supplied filter is not supported.", "The supplied finding type is not supported. For example, we do not provide findings of the given finding type.", - "The URL scheme of one or more of the supplied URLs is not supported." + "The URL scheme of one or more of the supplied URLs is not supported.", + "CAI is not able to list assets." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json index 335be03664c..0cdf8de2420 100644 --- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1alpha.json @@ -526,7 +526,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://websecurityscanner.googleapis.com/", "schemas": { "Authentication": { @@ -618,7 +618,10 @@ "XSS_ANGULAR_CALLBACK", "INVALID_HEADER", "MISSPELLED_SECURITY_HEADER_NAME", - "MISMATCHING_SECURITY_HEADER_VALUES" + "MISMATCHING_SECURITY_HEADER_VALUES", + "ACCESSIBLE_GIT_REPOSITORY", + "ACCESSIBLE_SVN_REPOSITORY", + "ACCESSIBLE_ENV_FILE" ], "enumDescriptions": [ "The invalid finding type.", @@ -632,7 +635,10 @@ "A cross-site scripting (XSS) vulnerability in AngularJS module that occurs when a user-provided string is interpolated by Angular.", "A malformed or invalid valued header.", "Misspelled security header name.", - "Mismatching values in a duplicate security header." + "Mismatching values in a duplicate security header.", + "A world-readable git repository that potentially leaks source code, commit history or sensitive information such as credentials.", + "A world-readable subversion repository that potentially leaks source code, commit history or sensitive information such as credentials.", + "A world-readable env file that potentially leaks source code, commit history or sensitive information such as credentials." ], "type": "string" }, @@ -706,7 +712,10 @@ "XSS_ANGULAR_CALLBACK", "INVALID_HEADER", "MISSPELLED_SECURITY_HEADER_NAME", - "MISMATCHING_SECURITY_HEADER_VALUES" + "MISMATCHING_SECURITY_HEADER_VALUES", + "ACCESSIBLE_GIT_REPOSITORY", + "ACCESSIBLE_SVN_REPOSITORY", + "ACCESSIBLE_ENV_FILE" ], "enumDescriptions": [ "The invalid finding type.", @@ -720,7 +729,10 @@ "A cross-site scripting (XSS) vulnerability in AngularJS module that occurs when a user-provided string is interpolated by Angular.", "A malformed or invalid valued header.", "Misspelled security header name.", - "Mismatching values in a duplicate security header." + "Mismatching values in a duplicate security header.", + "A world-readable git repository that potentially leaks source code, commit history or sensitive information such as credentials.", + "A world-readable subversion repository that potentially leaks source code, commit history or sensitive information such as credentials.", + "A world-readable env file that potentially leaks source code, commit history or sensitive information such as credentials." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json b/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json index 77dc2e6e4ee..8a56d1f0682 100644 --- a/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json +++ b/googleapiclient/discovery_cache/documents/websecurityscanner.v1beta.json @@ -526,7 +526,7 @@ } } }, - "revision": "20231015", + "revision": "20231021", "rootUrl": "https://websecurityscanner.googleapis.com/", "schemas": { "Authentication": { @@ -1052,7 +1052,8 @@ "UNSUPPORTED_BLACKLIST_PATTERN_FORMAT", "UNSUPPORTED_FILTER", "UNSUPPORTED_FINDING_TYPE", - "UNSUPPORTED_URL_SCHEME" + "UNSUPPORTED_URL_SCHEME", + "CLOUD_ASSET_INVENTORY_ASSET_NOT_FOUND" ], "enumDescriptions": [ "There is no error.", @@ -1096,7 +1097,8 @@ "One or more blacklist patterns were in the wrong format.", "The supplied filter is not supported.", "The supplied finding type is not supported. For example, we do not provide findings of the given finding type.", - "The URL scheme of one or more of the supplied URLs is not supported." + "The URL scheme of one or more of the supplied URLs is not supported.", + "CAI is not able to list assets." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json b/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json index 51100252dd3..508451fbd19 100644 --- a/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json +++ b/googleapiclient/discovery_cache/documents/workflowexecutions.v1.json @@ -366,6 +366,87 @@ ] } } + }, + "stepEntries": { + "methods": { + "get": { + "description": "Gets a step entry.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflows/{workflowsId}/executions/{executionsId}/stepEntries/{stepEntriesId}", + "httpMethod": "GET", + "id": "workflowexecutions.projects.locations.workflows.executions.stepEntries.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the step entry to retrieve. Format: projects/{project}/locations/{location}/workflows/{workflow}/executions/{execution}/stepEntries/{step_entry}", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workflows/[^/]+/executions/[^/]+/stepEntries/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "StepEntry" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists step entries for the corresponding workflow execution. Returned entries are ordered by their create_time.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/workflows/{workflowsId}/executions/{executionsId}/stepEntries", + "httpMethod": "GET", + "id": "workflowexecutions.projects.locations.workflows.executions.stepEntries.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Filters applied to the `[StepEntries.ListStepEntries]` results. The following fields are supported for filtering: `entryId`, `createTime`, `updateTime`, `routine`, `step`, `stepType`, `state`. For details, see AIP-160. For example, if you are using the Google APIs Explorer: `state=\"SUCCEEDED\"` or `createTime>\"2023-08-01\" AND state=\"FAILED\"`", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Optional. Comma-separated list of fields that specify the ordering applied to the `[StepEntries.ListStepEntries]` results. By default the ordering is based on ascending `entryId`. The following fields are supported for ordering: `entryId`, `createTime`, `updateTime`, `routine`, `step`, `stepType`, `state`. For details, see AIP-132.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. Number of step entries to return per call. The default max is 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A page token, received from a previous `ListStepEntries` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListStepEntries` must match the call that provided the page token.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. Name of the workflow execution to list entries for. Format: projects/{project}/locations/{location}/workflows/{workflow}/executions/{execution}/stepEntries/", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/workflows/[^/]+/executions/[^/]+$", + "required": true, + "type": "string" + }, + "skip": { + "description": "Optional. The number of step entries to skip. It can be used with or without a pageToken. If used with a pageToken, then it indicates the number of step entries to skip starting from the requested page.", + "format": "int32", + "location": "query", + "type": "integer" + } + }, + "path": "v1/{+parent}/stepEntries", + "response": { + "$ref": "ListStepEntriesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } } } } @@ -376,7 +457,7 @@ } } }, - "revision": "20231016", + "revision": "20231017", "rootUrl": "https://workflowexecutions.googleapis.com/", "schemas": { "Callback": { @@ -435,6 +516,17 @@ }, "type": "object" }, + "Exception": { + "description": "Exception describes why the step entry failed.", + "id": "Exception", + "properties": { + "payload": { + "description": "Error message represented as a JSON string.", + "type": "string" + } + }, + "type": "object" + }, "Execution": { "description": "A running instance of a [Workflow](/workflows/docs/reference/rest/v1/projects.locations.workflows).", "id": "Execution", @@ -587,6 +679,59 @@ }, "type": "object" }, + "ListStepEntriesResponse": { + "description": "Response message for ExecutionHistory.ListStepEntries.", + "id": "ListStepEntriesResponse", + "properties": { + "nextPageToken": { + "description": "A token to retrieve next page of results. Pass this value in the ListStepEntriesRequest.page_token field in the subsequent call to `ListStepEntries` method to retrieve the next page of results.", + "type": "string" + }, + "stepEntries": { + "description": "The list of entries.", + "items": { + "$ref": "StepEntry" + }, + "type": "array" + }, + "totalSize": { + "description": "Indicates the total number of StepEntries that matched the request filter. For running executions, this number shows the number of StepEntries that are executed thus far.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "NavigationInfo": { + "description": "NavigationInfo describes what steps if any come before or after this step, or what steps are parents or children of this step.", + "id": "NavigationInfo", + "properties": { + "children": { + "description": "Step entries that can be reached by \"stepping into\" e.g. a subworkflow call.", + "items": { + "format": "int64", + "type": "string" + }, + "type": "array" + }, + "next": { + "description": "The index of the next step in the current workflow, if any.", + "format": "int64", + "type": "string" + }, + "parent": { + "description": "The step entry, if any, that can be reached by \"stepping out\" of the current workflow being executed.", + "format": "int64", + "type": "string" + }, + "previous": { + "description": "The index of the previous step in the current workflow, if any.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "Position": { "description": "Position contains source position information about the stack trace element such as line number, column number and length of the code block in bytes.", "id": "Position", @@ -726,6 +871,163 @@ }, "type": "object" }, + "StepEntry": { + "description": "An StepEntry contains debugging information for a step transition in a workflow execution.", + "id": "StepEntry", + "properties": { + "createTime": { + "description": "Output only. The creation time of the step entry.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + }, + "entryId": { + "description": "Output only. The numeric ID of this step entry, used for navigation.", + "format": "int64", + "readOnly": true, + "type": "string" + }, + "exception": { + "$ref": "Exception", + "description": "Output only. The exception thrown by the step entry.", + "readOnly": true + }, + "name": { + "description": "Output only. The full resource name of the step entry. Each step entry has a unique entry ID, which is a monotonically increasing counter. Step entry names have the format: `projects/{project}/locations/{location}/workflows/{workflow}/executions/{execution}/stepEntries/{step_entry}`.", + "readOnly": true, + "type": "string" + }, + "navigationInfo": { + "$ref": "NavigationInfo", + "description": "Output only. The NavigationInfo associated to this step.", + "readOnly": true + }, + "routine": { + "description": "Output only. The name of the routine this step entry belongs to. A routine name is the subworkflow name defined in the YAML source code. The top level routine name is `main`.", + "readOnly": true, + "type": "string" + }, + "state": { + "description": "Output only. The state of the step entry.", + "enum": [ + "STATE_UNSPECIFIED", + "STATE_IN_PROGRESS", + "STATE_SUCCEEDED", + "STATE_FAILED" + ], + "enumDescriptions": [ + "Invalid state.", + "The step entry is in progress.", + "The step entry finished successfully.", + "The step entry failed with an error." + ], + "readOnly": true, + "type": "string" + }, + "step": { + "description": "Output only. The name of the step this step entry belongs to.", + "readOnly": true, + "type": "string" + }, + "stepEntryMetadata": { + "$ref": "StepEntryMetadata", + "description": "Output only. The StepEntryMetadata associated to this step.", + "readOnly": true + }, + "stepType": { + "description": "Output only. The type of the step this step entry belongs to.", + "enum": [ + "STEP_TYPE_UNSPECIFIED", + "STEP_ASSIGN", + "STEP_STD_LIB_CALL", + "STEP_CONNECTOR_CALL", + "STEP_SUBWORKFLOW_CALL", + "STEP_CALL", + "STEP_SWITCH", + "STEP_CONDITION", + "STEP_FOR", + "STEP_FOR_ITERATION", + "STEP_PARALLEL_FOR", + "STEP_PARALLEL_BRANCH", + "STEP_PARALLEL_BRANCH_ENTRY", + "STEP_TRY_RETRY_EXCEPT", + "STEP_TRY", + "STEP_RETRY", + "STEP_EXCEPT", + "STEP_RETURN", + "STEP_RAISE", + "STEP_GOTO" + ], + "enumDescriptions": [ + "Invalid step type.", + "The step entry assigns new variable(s).", + "The step entry calls a standard library routine.", + "The step entry calls a connector.", + "The step entry calls a subworklfow.", + "The step entry calls a subworkflow/stdlib.", + "The step entry executes a switch-case block.", + "The step entry executes a condition inside a switch.", + "The step entry executes a for loop.", + "The step entry executes a iteration of a for loop.", + "The step entry executes a parallel for loop.", + "The step entry executes a series of parallel branch(es).", + "The step entry executes a branch of a parallel branch.", + "The step entry executes a try/retry/except block.", + "The step entry executes the try part of a try/retry/except block.", + "The step entry executes the retry part of a try/retry/except block.", + "The step entry executes the except part of a try/retry/except block.", + "The step entry returns.", + "The step entry raises an error.", + "The step entry jumps to another step." + ], + "readOnly": true, + "type": "string" + }, + "updateTime": { + "description": "Output only. The most recently updated time of the step entry.", + "format": "google-datetime", + "readOnly": true, + "type": "string" + } + }, + "type": "object" + }, + "StepEntryMetadata": { + "description": "StepEntryMetadata contains metadata information about this step.", + "id": "StepEntryMetadata", + "properties": { + "progressNumber": { + "description": "Progress number represents the current state of the current progress. eg: A step entry represents the 4th iteration in a progress of PROGRESS_TYPE_FOR.", + "format": "int64", + "type": "string" + }, + "progressType": { + "description": "Progress type of this step entry.", + "enum": [ + "PROGRESS_TYPE_UNSPECIFIED", + "PROGRESS_TYPE_FOR", + "PROGRESS_TYPE_SWITCH", + "PROGRESS_TYPE_RETRY", + "PROGRESS_TYPE_PARALLEL_FOR", + "PROGRESS_TYPE_PARALLEL_BRANCH" + ], + "enumDescriptions": [ + "Current step entry does not have any progress data.", + "Current step entry is in progress of a FOR step.", + "Current step entry is in progress of a SWITCH step.", + "Current step entry is in progress of a RETRY step.", + "Current step entry is in progress of a PARALLEL FOR step.", + "Current step entry is in progress of a PARALLEL BRANCH step." + ], + "type": "string" + }, + "threadId": { + "description": "Child thread id that this step entry belongs to.", + "type": "string" + } + }, + "type": "object" + }, "TriggerPubsubExecutionRequest": { "description": "Request for the TriggerPubsubExecution method.", "id": "TriggerPubsubExecutionRequest", diff --git a/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json b/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json index 6a061b9ff67..d29c0547315 100644 --- a/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json +++ b/googleapiclient/discovery_cache/documents/workflowexecutions.v1beta.json @@ -269,7 +269,7 @@ } } }, - "revision": "20231016", + "revision": "20231017", "rootUrl": "https://workflowexecutions.googleapis.com/", "schemas": { "CancelExecutionRequest": { diff --git a/googleapiclient/discovery_cache/documents/workflows.v1.json b/googleapiclient/discovery_cache/documents/workflows.v1.json index b7a5da54cc4..f886e29f6c6 100644 --- a/googleapiclient/discovery_cache/documents/workflows.v1.json +++ b/googleapiclient/discovery_cache/documents/workflows.v1.json @@ -485,7 +485,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://workflows.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/workflows.v1beta.json b/googleapiclient/discovery_cache/documents/workflows.v1beta.json index 437888fa6d3..10fc77b9746 100644 --- a/googleapiclient/discovery_cache/documents/workflows.v1beta.json +++ b/googleapiclient/discovery_cache/documents/workflows.v1beta.json @@ -444,7 +444,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://workflows.googleapis.com/", "schemas": { "Empty": { diff --git a/googleapiclient/discovery_cache/documents/workloadmanager.v1.json b/googleapiclient/discovery_cache/documents/workloadmanager.v1.json index 21575916503..0bfc45bd17c 100644 --- a/googleapiclient/discovery_cache/documents/workloadmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/workloadmanager.v1.json @@ -707,7 +707,7 @@ } } }, - "revision": "20231008", + "revision": "20231017", "rootUrl": "https://workloadmanager.googleapis.com/", "schemas": { "CancelOperationRequest": { diff --git a/googleapiclient/discovery_cache/documents/workstations.v1.json b/googleapiclient/discovery_cache/documents/workstations.v1.json index 5b711c38c91..4d8cc10b247 100644 --- a/googleapiclient/discovery_cache/documents/workstations.v1.json +++ b/googleapiclient/discovery_cache/documents/workstations.v1.json @@ -1195,7 +1195,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://workstations.googleapis.com/", "schemas": { "AuditConfig": { @@ -1300,7 +1300,7 @@ "type": "object" }, "image": { - "description": "Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible.", + "description": "Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible.", "type": "string" }, "runAsUser": { @@ -1401,7 +1401,7 @@ "type": "integer" }, "serviceAccount": { - "description": "Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible.", + "description": "Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible.", "type": "string" }, "serviceAccountScopes": { diff --git a/googleapiclient/discovery_cache/documents/workstations.v1beta.json b/googleapiclient/discovery_cache/documents/workstations.v1beta.json index 9ae74d97e8a..20c403666aa 100644 --- a/googleapiclient/discovery_cache/documents/workstations.v1beta.json +++ b/googleapiclient/discovery_cache/documents/workstations.v1beta.json @@ -1127,7 +1127,7 @@ } } }, - "revision": "20231011", + "revision": "20231018", "rootUrl": "https://workstations.googleapis.com/", "schemas": { "Accelerator": { @@ -1248,7 +1248,7 @@ "type": "object" }, "image": { - "description": "Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration and must have permission to pull the specified image. Otherwise, the image must be publicly accessible.", + "description": "Optional. A Docker container image that defines a custom environment. Cloud Workstations provides a number of [preconfigured images](https://cloud.google.com/workstations/docs/preconfigured-base-images), but you can create your own [custom container images](https://cloud.google.com/workstations/docs/custom-container-images). If using a private image, the `host.gceInstance.serviceAccount` field must be specified in the workstation configuration. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. Otherwise, the image must be publicly accessible.", "type": "string" }, "runAsUser": { @@ -1382,7 +1382,7 @@ "type": "integer" }, "serviceAccount": { - "description": "Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have permissions to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible.", + "description": "Optional. The email address of the service account for Cloud Workstations VMs created with this configuration. When specified, be sure that the service account has `logginglogEntries.create` permission on the project so it can write logs out to Cloud Logging. If using a custom container image, the service account must have [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#roles) permission to pull the specified image. If you as the administrator want to be able to `ssh` into the underlying VM, you need to set this value to a service account for which you have the `iam.serviceAccounts.actAs` permission. Conversely, if you don't want anyone to be able to `ssh` into the underlying VM, use a service account where no one has that permission. If not set, VMs run with a service account provided by the Cloud Workstations service, and the image must be publicly accessible.", "type": "string" }, "serviceAccountScopes": { diff --git a/googleapiclient/discovery_cache/documents/youtube.v3.json b/googleapiclient/discovery_cache/documents/youtube.v3.json index 578151ec67c..94383d8b883 100644 --- a/googleapiclient/discovery_cache/documents/youtube.v3.json +++ b/googleapiclient/discovery_cache/documents/youtube.v3.json @@ -3994,7 +3994,7 @@ } } }, - "revision": "20231022", + "revision": "20231029", "rootUrl": "https://youtube.googleapis.com/", "schemas": { "AbuseReport": { diff --git a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json index 023aed572ff..e98d2429dbe 100644 --- a/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json +++ b/googleapiclient/discovery_cache/documents/youtubeAnalytics.v2.json @@ -421,7 +421,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://youtubeanalytics.googleapis.com/", "schemas": { "EmptyResponse": { diff --git a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json index 2380de5ece4..80627c02082 100644 --- a/googleapiclient/discovery_cache/documents/youtubereporting.v1.json +++ b/googleapiclient/discovery_cache/documents/youtubereporting.v1.json @@ -411,7 +411,7 @@ } } }, - "revision": "20231023", + "revision": "20231030", "rootUrl": "https://youtubereporting.googleapis.com/", "schemas": { "Empty": {