From e40139ded573066644065b6676e64db8eddc684c Mon Sep 17 00:00:00 2001 From: yoshi-code-bot <70984784+yoshi-code-bot@users.noreply.github.com> Date: Tue, 31 Oct 2023 07:24:14 -0700 Subject: [PATCH] chore: Update discovery artifacts (#2273) ## Deleted keys were detected in the following stable discovery artifacts: compute v1 https://togithub.com/googleapis/google-api-python-client/commit/204fa5aa6b49170f4c7641fe147e51f361e1b9c1 contentwarehouse v1 https://togithub.com/googleapis/google-api-python-client/commit/84de9162568c3c9ac9a1589534a107512d88e14b identitytoolkit v2 https://togithub.com/googleapis/google-api-python-client/commit/3380caef861f8e81556738d2e3bed73a04c29d16 mybusinesslodging v1 https://togithub.com/googleapis/google-api-python-client/commit/0f6e412d1a75d101acb5eb03e5d2f36384f11376 places v1 https://togithub.com/googleapis/google-api-python-client/commit/fb2cf0d1f3198a33f39e515472dbb2f4ae4720d6 sqladmin v1 https://togithub.com/googleapis/google-api-python-client/commit/c487e30ff7ee30d86201e0d4bc0076b553c69863 testing v1 https://togithub.com/googleapis/google-api-python-client/commit/35d4629dbe736ea689dfd8d04ae9425b9862b4ec ## Deleted keys were detected in the following pre-stable discovery artifacts: compute alpha https://togithub.com/googleapis/google-api-python-client/commit/204fa5aa6b49170f4c7641fe147e51f361e1b9c1 compute beta https://togithub.com/googleapis/google-api-python-client/commit/204fa5aa6b49170f4c7641fe147e51f361e1b9c1 sqladmin v1beta4 https://togithub.com/googleapis/google-api-python-client/commit/c487e30ff7ee30d86201e0d4bc0076b553c69863 ## Discovery Artifact Change Summary: feat(aiplatform): update the api https://togithub.com/googleapis/google-api-python-client/commit/7bdb02a3ab59e7e1f15df898fdd0b0e0596a1f92 feat(analyticsadmin): update the api https://togithub.com/googleapis/google-api-python-client/commit/5779a441640f840f866736514a66718a5ed07e80 feat(androiddeviceprovisioning): update the api https://togithub.com/googleapis/google-api-python-client/commit/160c287e0dd64e2576f787b66fd6b9d7cf7e155e feat(appengine): update the api https://togithub.com/googleapis/google-api-python-client/commit/d6c3c723660b03319bae6ab0573b83d9f340cc07 feat(assuredworkloads): update the api https://togithub.com/googleapis/google-api-python-client/commit/2d0f2c5b561c1844be7f382155ec232a214c1f5c feat(batch): update the api https://togithub.com/googleapis/google-api-python-client/commit/38e594da9323540fa904a25d385c76b4fc6edb2c feat(cloudbilling): update the api https://togithub.com/googleapis/google-api-python-client/commit/d536b311d73aeac6ae0af89f3bcc22a204ce53f9 feat(compute): update the api https://togithub.com/googleapis/google-api-python-client/commit/204fa5aa6b49170f4c7641fe147e51f361e1b9c1 feat(connectors): update the api https://togithub.com/googleapis/google-api-python-client/commit/e6171f5380b7a0eb1fbf3a7b910f2c9fef0112b7 feat(contentwarehouse): update the api https://togithub.com/googleapis/google-api-python-client/commit/84de9162568c3c9ac9a1589534a107512d88e14b feat(dataflow): update the api https://togithub.com/googleapis/google-api-python-client/commit/af59b3870e78f4c6841623b8aa68a850902b8f86 feat(dataproc): update the api https://togithub.com/googleapis/google-api-python-client/commit/01acfe0787e47037aca16f96dbefc164d449f8ca feat(gmail): update the api https://togithub.com/googleapis/google-api-python-client/commit/4dc6c3595923f51dbcad4e1e3aee1b6cd183b814 feat(identitytoolkit): update the api https://togithub.com/googleapis/google-api-python-client/commit/3380caef861f8e81556738d2e3bed73a04c29d16 feat(metastore): update the api https://togithub.com/googleapis/google-api-python-client/commit/270ef650a7a7e678cd37faaf90ff3a5e4c23252a feat(mybusinesslodging): update the api https://togithub.com/googleapis/google-api-python-client/commit/0f6e412d1a75d101acb5eb03e5d2f36384f11376 feat(notebooks): update the api https://togithub.com/googleapis/google-api-python-client/commit/419948bd02ef47dec5456f9d1ce341eb19d0f738 feat(places): update the api https://togithub.com/googleapis/google-api-python-client/commit/fb2cf0d1f3198a33f39e515472dbb2f4ae4720d6 feat(recaptchaenterprise): update the api https://togithub.com/googleapis/google-api-python-client/commit/2985fd0f9c76efa304e9e16db3c362ebb654f11a feat(run): update the api https://togithub.com/googleapis/google-api-python-client/commit/954b59d3bc80fa007ac65a69f9337456fc8da06f feat(servicenetworking): update the api https://togithub.com/googleapis/google-api-python-client/commit/8eda3cbe99c0fc35a66edd52e40cda7b30f35bc9 feat(spanner): update the api https://togithub.com/googleapis/google-api-python-client/commit/d046f6e7f0b7be466423e06f9339c6edf911b97e feat(sqladmin): update the api https://togithub.com/googleapis/google-api-python-client/commit/c487e30ff7ee30d86201e0d4bc0076b553c69863 feat(testing): update the api https://togithub.com/googleapis/google-api-python-client/commit/35d4629dbe736ea689dfd8d04ae9425b9862b4ec feat(texttospeech): update the api https://togithub.com/googleapis/google-api-python-client/commit/44ffbb0a7bbbfc6452e57a05c213523774578364 feat(vmmigration): update the api https://togithub.com/googleapis/google-api-python-client/commit/f9cd5f95977effb262b17ce5d5b4c4895872954c feat(workflowexecutions): update the api https://togithub.com/googleapis/google-api-python-client/commit/c24e2608c3996d57b5e062fcb33c58f83f8838d7 --- ...sapproval_v1.folders.approvalRequests.html | 15 +- ...val_v1.organizations.approvalRequests.html | 15 +- ...approval_v1.projects.approvalRequests.html | 15 +- ...form_v1.projects.locations.customJobs.html | 8 +- ...ts.locations.hyperparameterTuningJobs.html | 68 +- ...latform_v1.projects.locations.nasJobs.html | 16 +- ...latform_v1.projects.locations.studies.html | 85 +- ...v1beta1.projects.locations.customJobs.html | 8 +- ...ts.locations.hyperparameterTuningJobs.html | 68 +- ...rm_v1beta1.projects.locations.nasJobs.html | 16 +- ...rm_v1beta1.projects.locations.studies.html | 85 +- docs/dyn/analyticsadmin_v1alpha.accounts.html | 8 + ...n_v1alpha.properties.conversionEvents.html | 24 + docs/dyn/analyticsadmin_v1beta.accounts.html | 8 + ...in_v1beta.properties.conversionEvents.html | 24 + ...eviceprovisioning_v1.partners.devices.html | 6 +- ...roidmanagement_v1.enterprises.devices.html | 2 +- .../appengine_v1.apps.services.versions.html | 12 + ..._v1.organizations.locations.workloads.html | 24 +- ...ta1.organizations.locations.workloads.html | 20 +- .../batch_v1.projects.locations.state.html | 1 + docs/dyn/biglake_v1.html | 111 + docs/dyn/biglake_v1.projects.html | 91 + ...projects.locations.catalogs.databases.html | 318 ++ ...s.locations.catalogs.databases.tables.html | 430 +++ ...iglake_v1.projects.locations.catalogs.html | 231 ++ docs/dyn/biglake_v1.projects.locations.html | 91 + docs/dyn/cloudbilling_v1.billingAccounts.html | 47 +- ...illing_v1.billingAccounts.subAccounts.html | 171 ++ docs/dyn/cloudbilling_v1.html | 5 + ...ling_v1.organizations.billingAccounts.html | 197 ++ docs/dyn/cloudbilling_v1.organizations.html | 91 + .../dyn/cloudtasks_v2.projects.locations.html | 6 +- ...oudtasks_v2.projects.locations.queues.html | 4 +- ...cloudtasks_v2beta2.projects.locations.html | 6 +- ...beta2.projects.locations.queues.tasks.html | 4 +- ...cloudtasks_v2beta3.projects.locations.html | 6 +- ...sks_v2beta3.projects.locations.queues.html | 4 +- ...beta3.projects.locations.queues.tasks.html | 4 +- docs/dyn/compute_alpha.backendServices.html | 14 +- ...ute_alpha.globalNetworkEndpointGroups.html | 3 - docs/dyn/compute_alpha.instances.html | 145 + .../compute_alpha.networkEndpointGroups.html | 4 - .../compute_alpha.regionBackendServices.html | 12 +- docs/dyn/compute_alpha.regionCommitments.html | 15 + ...ute_alpha.regionNetworkEndpointGroups.html | 3 - ...ations.connections.eventSubscriptions.html | 16 + ...cations.providers.connectors.versions.html | 2 + docs/dyn/content_v2_1.reports.html | 6 +- docs/dyn/dataflow_v1b3.projects.jobs.html | 7 + ...v1b3.projects.locations.flexTemplates.html | 1 + ...dataflow_v1b3.projects.locations.jobs.html | 6 + ...low_v1b3.projects.locations.templates.html | 2 + .../dyn/dataflow_v1b3.projects.templates.html | 2 + ...aplex_v1.projects.locations.dataScans.html | 10 +- ..._v1.projects.locations.dataScans.jobs.html | 4 +- ...ataproc_v1.projects.locations.batches.html | 12 + ...taproc_v1.projects.locations.sessions.html | 12 + ....projects.locations.workflowTemplates.html | 24 + ...v1.projects.regions.workflowTemplates.html | 24 + ..._v2.organizations.deidentifyTemplates.html | 8 +- ...dlp_v2.organizations.inspectTemplates.html | 8 +- ...zations.locations.deidentifyTemplates.html | 8 +- ...anizations.locations.discoveryConfigs.html | 150 +- ...lp_v2.organizations.locations.dlpJobs.html | 2 +- ...anizations.locations.inspectTemplates.html | 8 +- ...2.organizations.locations.jobTriggers.html | 8 +- ...ganizations.locations.storedInfoTypes.html | 8 +- .../dlp_v2.organizations.storedInfoTypes.html | 8 +- .../dlp_v2.projects.deidentifyTemplates.html | 8 +- docs/dyn/dlp_v2.projects.dlpJobs.html | 2 +- .../dyn/dlp_v2.projects.inspectTemplates.html | 8 +- docs/dyn/dlp_v2.projects.jobTriggers.html | 8 +- ...rojects.locations.deidentifyTemplates.html | 8 +- ...2.projects.locations.discoveryConfigs.html | 150 +- .../dlp_v2.projects.locations.dlpJobs.html | 2 +- ...2.projects.locations.inspectTemplates.html | 8 +- ...dlp_v2.projects.locations.jobTriggers.html | 8 +- ...v2.projects.locations.storedInfoTypes.html | 8 +- docs/dyn/dlp_v2.projects.storedInfoTypes.html | 8 +- .../gmail_v1.users.settings.cse.keypairs.html | 18 + docs/dyn/identitytoolkit_v1.accounts.html | 4 +- .../identitytoolkit_v1.projects.accounts.html | 2 +- ...ytoolkit_v1.projects.tenants.accounts.html | 2 +- docs/dyn/identitytoolkit_v2.projects.html | 64 - .../identitytoolkit_v2.projects.tenants.html | 64 - docs/dyn/index.md | 4 + ...1.projects.locations.services.backups.html | 15 + ...astore_v1.projects.locations.services.html | 20 + ...a.projects.locations.services.backups.html | 18 +- ...e_v1alpha.projects.locations.services.html | 24 +- ...a.projects.locations.services.backups.html | 18 +- ...re_v1beta.projects.locations.services.html | 24 +- .../monitoring_v3.projects.alertPolicies.html | 12 +- .../monitoring_v3.projects.timeSeries.html | 4 +- docs/dyn/mybusinesslodging_v1.locations.html | 30 +- ...ybusinesslodging_v1.locations.lodging.html | 10 +- ....projects.locations.lbRouteExtensions.html | 16 +- ...rojects.locations.lbTrafficExtensions.html | 8 +- ...books_v1.projects.locations.instances.html | 6 +- ...books_v2.projects.locations.instances.html | 38 + docs/dyn/places_v1.places.html | 891 +++++- docs/dyn/places_v1.places.photos.html | 112 + docs/dyn/pubsub_v1.projects.schemas.html | 2 +- docs/dyn/pubsub_v1.projects.snapshots.html | 6 +- .../dyn/pubsub_v1.projects.subscriptions.html | 4 +- docs/dyn/pubsub_v1.projects.topics.html | 16 +- ...chaenterprise_v1.projects.assessments.html | 33 +- .../dyn/run_v1.namespaces.configurations.html | 8 +- .../dyn/run_v1.namespaces.domainmappings.html | 8 +- docs/dyn/run_v1.namespaces.executions.html | 6 +- docs/dyn/run_v1.namespaces.jobs.html | 26 +- docs/dyn/run_v1.namespaces.revisions.html | 4 +- docs/dyn/run_v1.namespaces.routes.html | 4 +- docs/dyn/run_v1.namespaces.services.html | 24 +- docs/dyn/run_v1.namespaces.tasks.html | 4 +- ..._v1.projects.locations.configurations.html | 8 +- ..._v1.projects.locations.domainmappings.html | 8 +- .../run_v1.projects.locations.revisions.html | 4 +- .../dyn/run_v1.projects.locations.routes.html | 4 +- .../run_v1.projects.locations.services.html | 24 +- .../run_v2.projects.locations.services.html | 12 + docs/dyn/servicenetworking_v1.services.html | 1 + ...v1.services.projects.global_.networks.html | 22 + docs/dyn/spanner_v1.projects.instances.html | 48 + docs/dyn/sqladmin_v1.backupRuns.html | 8 +- docs/dyn/sqladmin_v1.databases.html | 16 +- docs/dyn/sqladmin_v1.instances.html | 109 +- docs/dyn/sqladmin_v1.operations.html | 8 +- docs/dyn/sqladmin_v1.projects.instances.html | 16 +- docs/dyn/sqladmin_v1.sslCerts.html | 8 +- docs/dyn/sqladmin_v1.users.html | 12 +- docs/dyn/sqladmin_v1beta4.backupRuns.html | 8 +- docs/dyn/sqladmin_v1beta4.databases.html | 16 +- docs/dyn/sqladmin_v1beta4.instances.html | 109 +- docs/dyn/sqladmin_v1beta4.operations.html | 8 +- .../sqladmin_v1beta4.projects.instances.html | 16 +- docs/dyn/sqladmin_v1beta4.sslCerts.html | 8 +- docs/dyn/sqladmin_v1beta4.users.html | 12 +- .../testing_v1.projects.deviceSessions.html | 72 +- .../texttospeech_v1.projects.locations.html | 2 +- docs/dyn/texttospeech_v1.text.html | 2 +- ...xttospeech_v1beta1.projects.locations.html | 2 +- docs/dyn/texttospeech_v1beta1.text.html | 2 +- ...gration_v1.projects.locations.sources.html | 12 + ...ations.sources.migratingVms.cloneJobs.html | 9 + ...ions.sources.migratingVms.cutoverJobs.html | 9 + ...ojects.locations.sources.migratingVms.html | 72 + ...n_v1alpha1.projects.locations.sources.html | 12 + ...ations.sources.migratingVms.cloneJobs.html | 9 + ...ions.sources.migratingVms.cutoverJobs.html | 9 + ...ojects.locations.sources.migratingVms.html | 72 + ...ojects.locations.workflows.executions.html | 5 + ...ions.workflows.executions.stepEntries.html | 204 ++ ...orkstationClusters.workstationConfigs.html | 20 +- ...orkstationClusters.workstationConfigs.html | 20 +- .../acceleratedmobilepageurl.v1.json | 2 +- .../accesscontextmanager.v1beta.json | 2 +- .../discovery_cache/documents/acmedns.v1.json | 2 +- .../documents/adexchangebuyer2.v2beta1.json | 2 +- .../documents/admin.datatransfer_v1.json | 2 +- .../documents/admin.directory_v1.json | 2 +- .../documents/admin.reports_v1.json | 2 +- .../discovery_cache/documents/admob.v1.json | 2 +- .../documents/admob.v1beta.json | 2 +- .../discovery_cache/documents/adsense.v2.json | 2 +- .../documents/advisorynotifications.v1.json | 2 +- .../documents/aiplatform.v1.json | 66 +- .../documents/aiplatform.v1beta1.json | 66 +- .../documents/alertcenter.v1beta1.json | 2 +- .../documents/analyticsadmin.v1alpha.json | 22 +- .../documents/analyticsadmin.v1beta.json | 22 +- .../documents/analyticsdata.v1beta.json | 2 +- .../documents/analyticshub.v1.json | 2 +- .../documents/analyticshub.v1beta1.json | 2 +- .../androiddeviceprovisioning.v1.json | 16 +- .../documents/androidenterprise.v1.json | 2 +- .../documents/androidmanagement.v1.json | 18 +- .../documents/androidpublisher.v3.json | 15 +- .../documents/apigateway.v1.json | 2 +- .../documents/apigateway.v1beta.json | 2 +- .../discovery_cache/documents/apikeys.v2.json | 2 +- .../documents/appengine.v1.json | 10 +- .../documents/appengine.v1alpha.json | 2 +- .../documents/appengine.v1beta.json | 2 +- .../documents/area120tables.v1alpha1.json | 2 +- .../documents/artifactregistry.v1.json | 2 +- .../documents/artifactregistry.v1beta1.json | 2 +- .../documents/artifactregistry.v1beta2.json | 2 +- .../documents/assuredworkloads.v1.json | 7 +- .../documents/assuredworkloads.v1beta1.json | 7 +- .../authorizedbuyersmarketplace.v1.json | 2 +- .../documents/backupdr.v1.json | 2 +- .../documents/baremetalsolution.v2.json | 2 +- .../discovery_cache/documents/batch.v1.json | 6 +- .../discovery_cache/documents/biglake.v1.json | 910 ++++++ .../documents/bigquery.v2.json | 2 +- .../documents/bigqueryconnection.v1beta1.json | 2 +- .../documents/bigquerydatatransfer.v1.json | 2 +- .../documents/bigqueryreservation.v1.json | 2 +- .../documents/bigtableadmin.v2.json | 2 +- .../documents/billingbudgets.v1.json | 2 +- .../documents/billingbudgets.v1beta1.json | 2 +- .../documents/binaryauthorization.v1.json | 2 +- .../binaryauthorization.v1beta1.json | 2 +- .../documents/blockchainnodeengine.v1.json | 2 +- .../discovery_cache/documents/blogger.v2.json | 2 +- .../discovery_cache/documents/blogger.v3.json | 2 +- .../discovery_cache/documents/books.v1.json | 2 +- .../businessprofileperformance.v1.json | 2 +- .../documents/calendar.v3.json | 2 +- .../discovery_cache/documents/chat.v1.json | 2 +- .../documents/checks.v1alpha.json | 2 +- .../documents/chromemanagement.v1.json | 2 +- .../documents/chromepolicy.v1.json | 2 +- .../documents/chromeuxreport.v1.json | 2 +- .../documents/classroom.v1.json | 2 +- .../documents/cloudasset.v1.json | 2 +- .../documents/cloudasset.v1beta1.json | 2 +- .../documents/cloudasset.v1p1beta1.json | 2 +- .../documents/cloudasset.v1p5beta1.json | 2 +- .../documents/cloudasset.v1p7beta1.json | 2 +- .../documents/cloudbilling.v1.json | 245 +- .../documents/cloudbilling.v1beta.json | 2 +- .../documents/cloudbuild.v1.json | 2 +- .../documents/cloudbuild.v2.json | 2 +- .../documents/cloudchannel.v1.json | 2 +- .../documents/clouddeploy.v1.json | 2 +- .../clouderrorreporting.v1beta1.json | 2 +- .../documents/cloudfunctions.v1.json | 2 +- .../documents/cloudfunctions.v2.json | 2 +- .../documents/cloudfunctions.v2alpha.json | 2 +- .../documents/cloudfunctions.v2beta.json | 2 +- .../documents/cloudidentity.v1.json | 2 +- .../documents/cloudidentity.v1beta1.json | 2 +- .../documents/cloudkms.v1.json | 2 +- .../documents/cloudprofiler.v2.json | 2 +- .../documents/cloudresourcemanager.v1.json | 2 +- .../cloudresourcemanager.v1beta1.json | 2 +- .../documents/cloudresourcemanager.v2.json | 2 +- .../cloudresourcemanager.v2beta1.json | 2 +- .../documents/cloudresourcemanager.v3.json | 2 +- .../documents/cloudscheduler.v1.json | 2 +- .../documents/cloudscheduler.v1beta1.json | 2 +- .../documents/cloudsearch.v1.json | 2 +- .../documents/cloudshell.v1.json | 2 +- .../documents/cloudsupport.v2.json | 2 +- .../documents/cloudsupport.v2beta.json | 2 +- .../documents/cloudtasks.v2.json | 6 +- .../documents/cloudtasks.v2beta2.json | 6 +- .../documents/cloudtasks.v2beta3.json | 8 +- .../documents/compute.alpha.json | 120 +- .../documents/compute.beta.json | 7 +- .../discovery_cache/documents/compute.v1.json | 7 +- .../documents/connectors.v1.json | 59 +- .../documents/connectors.v2.json | 2 +- .../documents/containeranalysis.v1.json | 2 +- .../documents/containeranalysis.v1alpha1.json | 2 +- .../documents/containeranalysis.v1beta1.json | 2 +- .../documents/content.v2.1.json | 8 +- .../documents/contentwarehouse.v1.json | 2696 ++++------------- .../documents/customsearch.v1.json | 2 +- .../documents/datacatalog.v1.json | 2 +- .../documents/datacatalog.v1beta1.json | 2 +- .../documents/dataflow.v1b3.json | 7 +- .../documents/datalineage.v1.json | 2 +- .../documents/datamigration.v1.json | 2 +- .../documents/datamigration.v1beta1.json | 2 +- .../documents/datapipelines.v1.json | 2 +- .../documents/dataplex.v1.json | 4 +- .../documents/dataproc.v1.json | 49 +- .../documents/datastore.v1.json | 2 +- .../documents/datastore.v1beta1.json | 2 +- .../documents/datastore.v1beta3.json | 2 +- .../documents/dialogflow.v2.json | 2 +- .../documents/dialogflow.v2beta1.json | 2 +- .../documents/dialogflow.v3.json | 2 +- .../documents/dialogflow.v3beta1.json | 2 +- .../documents/digitalassetlinks.v1.json | 2 +- .../documents/discoveryengine.v1alpha.json | 2 +- .../documents/discoveryengine.v1beta.json | 2 +- .../documents/displayvideo.v1.json | 23 +- .../documents/displayvideo.v2.json | 23 +- .../documents/displayvideo.v3.json | 27 +- .../discovery_cache/documents/dlp.v2.json | 178 +- .../discovery_cache/documents/dns.v1.json | 2 +- .../documents/dns.v1beta2.json | 2 +- .../discovery_cache/documents/docs.v1.json | 2 +- .../documents/documentai.v1.json | 2 +- .../documents/documentai.v1beta2.json | 2 +- .../documents/documentai.v1beta3.json | 2 +- .../discovery_cache/documents/domains.v1.json | 8 +- .../documents/domains.v1alpha2.json | 8 +- .../documents/domains.v1beta1.json | 8 +- .../documents/domainsrdap.v1.json | 2 +- .../documents/doubleclickbidmanager.v2.json | 2 +- .../documents/doubleclicksearch.v2.json | 2 +- .../discovery_cache/documents/drive.v2.json | 2 +- .../discovery_cache/documents/drive.v3.json | 2 +- .../documents/driveactivity.v2.json | 2 +- .../documents/drivelabels.v2.json | 2 +- .../documents/drivelabels.v2beta.json | 2 +- .../documents/essentialcontacts.v1.json | 2 +- .../documents/eventarc.v1.json | 2 +- .../documents/eventarc.v1beta1.json | 2 +- .../documents/factchecktools.v1alpha1.json | 2 +- .../discovery_cache/documents/fcm.v1.json | 2 +- .../documents/fcmdata.v1beta1.json | 2 +- .../discovery_cache/documents/file.v1.json | 14 +- .../documents/file.v1beta1.json | 14 +- .../documents/firebaseappdistribution.v1.json | 2 +- .../documents/firebasedatabase.v1beta.json | 2 +- .../documents/firebasehosting.v1.json | 2 +- .../documents/firebasehosting.v1beta1.json | 2 +- .../documents/firebaseml.v1.json | 2 +- .../documents/firebaseml.v1beta2.json | 2 +- .../documents/firebasestorage.v1beta.json | 2 +- .../documents/firestore.v1.json | 2 +- .../documents/firestore.v1beta1.json | 2 +- .../documents/firestore.v1beta2.json | 2 +- .../discovery_cache/documents/fitness.v1.json | 2 +- .../discovery_cache/documents/forms.v1.json | 2 +- .../discovery_cache/documents/games.v1.json | 2 +- .../gamesConfiguration.v1configuration.json | 2 +- .../gamesManagement.v1management.json | 2 +- .../documents/gkebackup.v1.json | 2 +- .../discovery_cache/documents/gkehub.v1.json | 2 +- .../documents/gkehub.v1alpha.json | 2 +- .../documents/gkehub.v1alpha2.json | 2 +- .../documents/gkehub.v1beta.json | 2 +- .../documents/gkehub.v1beta1.json | 2 +- .../documents/gkehub.v2alpha.json | 2 +- .../documents/gkeonprem.v1.json | 2 +- .../discovery_cache/documents/gmail.v1.json | 17 +- .../documents/gmailpostmastertools.v1.json | 2 +- .../gmailpostmastertools.v1beta1.json | 2 +- .../documents/groupsmigration.v1.json | 2 +- .../documents/healthcare.v1.json | 2 +- .../documents/healthcare.v1beta1.json | 2 +- .../documents/homegraph.v1.json | 2 +- .../documents/iamcredentials.v1.json | 2 +- .../documents/iap.v1beta1.json | 2 +- .../documents/identitytoolkit.v1.json | 6 +- .../documents/identitytoolkit.v2.json | 146 +- .../documents/indexing.v3.json | 2 +- .../discovery_cache/documents/jobs.v3.json | 2 +- .../discovery_cache/documents/jobs.v4.json | 2 +- .../discovery_cache/documents/keep.v1.json | 2 +- .../documents/kmsinventory.v1.json | 2 +- .../documents/language.v1.json | 2 +- .../documents/language.v1beta2.json | 2 +- .../documents/language.v2.json | 2 +- .../documents/libraryagent.v1.json | 2 +- .../documents/licensing.v1.json | 2 +- .../documents/lifesciences.v2beta.json | 2 +- .../documents/localservices.v1.json | 2 +- .../documents/memcache.v1.json | 2 +- .../documents/memcache.v1beta2.json | 2 +- .../documents/metastore.v1.json | 28 +- .../documents/metastore.v1alpha.json | 8 +- .../documents/metastore.v1beta.json | 8 +- .../documents/migrationcenter.v1.json | 2 +- .../documents/migrationcenter.v1alpha1.json | 2 +- .../documents/monitoring.v1.json | 2 +- .../documents/monitoring.v3.json | 6 +- .../mybusinessaccountmanagement.v1.json | 2 +- .../mybusinessbusinessinformation.v1.json | 2 +- .../documents/mybusinesslodging.v1.json | 19 +- .../documents/mybusinessnotifications.v1.json | 2 +- .../documents/mybusinessplaceactions.v1.json | 2 +- .../documents/mybusinessqanda.v1.json | 2 +- .../documents/mybusinessverifications.v1.json | 2 +- .../documents/networkmanagement.v1.json | 2 +- .../documents/networkmanagement.v1beta1.json | 2 +- .../documents/networksecurity.v1.json | 2 +- .../documents/networksecurity.v1beta1.json | 2 +- .../documents/networkservices.v1.json | 2 +- .../documents/networkservices.v1beta1.json | 8 +- .../documents/notebooks.v1.json | 4 +- .../documents/notebooks.v2.json | 104 +- .../documents/ondemandscanning.v1.json | 2 +- .../documents/ondemandscanning.v1beta1.json | 2 +- .../documents/orgpolicy.v2.json | 2 +- .../discovery_cache/documents/oslogin.v1.json | 2 +- .../documents/oslogin.v1alpha.json | 2 +- .../documents/oslogin.v1beta.json | 2 +- .../documents/pagespeedonline.v5.json | 2 +- .../paymentsresellersubscription.v1.json | 2 +- .../discovery_cache/documents/people.v1.json | 2 +- .../discovery_cache/documents/places.v1.json | 636 +++- .../documents/playcustomapp.v1.json | 2 +- .../playdeveloperreporting.v1alpha1.json | 2 +- .../playdeveloperreporting.v1beta1.json | 2 +- .../documents/playgrouping.v1alpha1.json | 2 +- .../documents/playintegrity.v1.json | 2 +- .../documents/policyanalyzer.v1.json | 2 +- .../documents/policyanalyzer.v1beta1.json | 2 +- .../documents/policysimulator.v1.json | 2 +- .../documents/policysimulator.v1alpha.json | 2 +- .../documents/policysimulator.v1beta.json | 2 +- .../documents/policytroubleshooter.v1.json | 2 +- .../policytroubleshooter.v1beta.json | 2 +- .../documents/privateca.v1.json | 2 +- .../documents/privateca.v1beta1.json | 2 +- .../documents/prod_tt_sasportal.v1alpha1.json | 2 +- .../documents/publicca.v1.json | 2 +- .../documents/publicca.v1alpha1.json | 2 +- .../documents/publicca.v1beta1.json | 2 +- .../discovery_cache/documents/pubsub.v1.json | 14 +- .../documents/pubsub.v1beta1a.json | 2 +- .../documents/pubsub.v1beta2.json | 2 +- .../documents/pubsublite.v1.json | 2 +- .../rapidmigrationassessment.v1.json | 2 +- .../readerrevenuesubscriptionlinking.v1.json | 2 +- .../documents/realtimebidding.v1.json | 2 +- .../documents/recaptchaenterprise.v1.json | 59 +- .../recommendationengine.v1beta1.json | 2 +- .../documents/recommender.v1.json | 2 +- .../documents/recommender.v1beta1.json | 2 +- .../discovery_cache/documents/redis.v1.json | 2 +- .../documents/redis.v1beta1.json | 2 +- .../documents/reseller.v1.json | 2 +- .../documents/resourcesettings.v1.json | 2 +- .../discovery_cache/documents/retail.v2.json | 2 +- .../documents/retail.v2alpha.json | 2 +- .../documents/retail.v2beta.json | 2 +- .../discovery_cache/documents/run.v1.json | 4 +- .../discovery_cache/documents/run.v2.json | 18 +- .../documents/runtimeconfig.v1.json | 2 +- .../documents/safebrowsing.v4.json | 2 +- .../documents/safebrowsing.v5.json | 2 +- .../documents/searchconsole.v1.json | 2 +- .../documents/secretmanager.v1.json | 2 +- .../documents/secretmanager.v1beta1.json | 2 +- .../documents/securitycenter.v1.json | 2 +- .../documents/securitycenter.v1beta1.json | 2 +- .../documents/securitycenter.v1beta2.json | 2 +- .../serviceconsumermanagement.v1.json | 2 +- .../serviceconsumermanagement.v1beta1.json | 2 +- .../documents/servicecontrol.v1.json | 2 +- .../documents/servicecontrol.v2.json | 2 +- .../documents/servicedirectory.v1.json | 2 +- .../documents/servicedirectory.v1beta1.json | 2 +- .../documents/servicemanagement.v1.json | 2 +- .../documents/servicenetworking.v1.json | 44 +- .../documents/servicenetworking.v1beta.json | 14 +- .../documents/serviceusage.v1.json | 2 +- .../documents/serviceusage.v1beta1.json | 2 +- .../discovery_cache/documents/slides.v1.json | 2 +- .../discovery_cache/documents/spanner.v1.json | 63 + .../discovery_cache/documents/speech.v1.json | 2 +- .../documents/speech.v1p1beta1.json | 2 +- .../documents/sqladmin.v1.json | 32 +- .../documents/sqladmin.v1beta4.json | 32 +- .../discovery_cache/documents/storage.v1.json | 4 +- .../documents/storagetransfer.v1.json | 2 +- .../documents/streetviewpublish.v1.json | 2 +- .../discovery_cache/documents/sts.v1.json | 2 +- .../discovery_cache/documents/sts.v1beta.json | 2 +- .../documents/tagmanager.v1.json | 2 +- .../documents/tagmanager.v2.json | 2 +- .../discovery_cache/documents/tasks.v1.json | 2 +- .../discovery_cache/documents/testing.v1.json | 9 +- .../documents/texttospeech.v1.json | 5 +- .../documents/texttospeech.v1beta1.json | 5 +- .../documents/toolresults.v1beta3.json | 2 +- .../discovery_cache/documents/tpu.v1.json | 2 +- .../documents/tpu.v1alpha1.json | 2 +- .../discovery_cache/documents/tpu.v2.json | 2 +- .../documents/tpu.v2alpha1.json | 2 +- .../documents/trafficdirector.v2.json | 2 +- .../documents/transcoder.v1.json | 2 +- .../documents/travelimpactmodel.v1.json | 2 +- .../documents/verifiedaccess.v1.json | 2 +- .../documents/verifiedaccess.v2.json | 2 +- .../documents/versionhistory.v1.json | 2 +- .../discovery_cache/documents/vision.v1.json | 2 +- .../documents/vision.v1p1beta1.json | 2 +- .../documents/vision.v1p2beta1.json | 2 +- .../documents/vmmigration.v1.json | 43 +- .../documents/vmmigration.v1alpha1.json | 43 +- .../documents/vpcaccess.v1beta1.json | 2 +- .../discovery_cache/documents/webrisk.v1.json | 2 +- .../documents/websecurityscanner.v1.json | 8 +- .../documents/websecurityscanner.v1alpha.json | 22 +- .../documents/websecurityscanner.v1beta.json | 8 +- .../documents/workflowexecutions.v1.json | 304 +- .../documents/workflowexecutions.v1beta.json | 2 +- .../documents/workflows.v1.json | 2 +- .../documents/workflows.v1beta.json | 2 +- .../documents/workloadmanager.v1.json | 2 +- .../documents/workstations.v1.json | 6 +- .../documents/workstations.v1beta.json | 6 +- .../discovery_cache/documents/youtube.v3.json | 2 +- .../documents/youtubeAnalytics.v2.json | 2 +- .../documents/youtubereporting.v1.json | 2 +- 496 files changed, 8741 insertions(+), 3890 deletions(-) create mode 100644 docs/dyn/biglake_v1.html create mode 100644 docs/dyn/biglake_v1.projects.html create mode 100644 docs/dyn/biglake_v1.projects.locations.catalogs.databases.html create mode 100644 docs/dyn/biglake_v1.projects.locations.catalogs.databases.tables.html create mode 100644 docs/dyn/biglake_v1.projects.locations.catalogs.html create mode 100644 docs/dyn/biglake_v1.projects.locations.html create mode 100644 docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html create mode 100644 docs/dyn/cloudbilling_v1.organizations.billingAccounts.html create mode 100644 docs/dyn/cloudbilling_v1.organizations.html create mode 100644 docs/dyn/places_v1.places.photos.html create mode 100644 docs/dyn/workflowexecutions_v1.projects.locations.workflows.executions.stepEntries.html create mode 100644 googleapiclient/discovery_cache/documents/biglake.v1.json diff --git a/docs/dyn/accessapproval_v1.folders.approvalRequests.html b/docs/dyn/accessapproval_v1.folders.approvalRequests.html index 8ec490186b6..27e2b2380db 100644 --- a/docs/dyn/accessapproval_v1.folders.approvalRequests.html +++ b/docs/dyn/accessapproval_v1.folders.approvalRequests.html @@ -135,8 +135,7 @@
+ projects()
+
Returns the projects Resource.
+ +
+ close()
Close httplib2 connections.
+ +Create a BatchHttpRequest object based on the discovery document.
+close()
+ Close httplib2 connections.+
new_batch_http_request()
+ Create a BatchHttpRequest object based on the discovery document. + + Args: + callback: callable, A callback to be called for each response, of the + form callback(id, response, exception). The first parameter is the + request id, and the second is the deserialized response object. The + third is an apiclient.errors.HttpError exception object if an HTTP + error occurred while processing the request, or None if no error + occurred. + + Returns: + A BatchHttpRequest object based on the discovery document. ++
+ locations()
+
Returns the locations Resource.
+ +
+ close()
Close httplib2 connections.
+close()
+ Close httplib2 connections.+
+ tables()
+
Returns the tables Resource.
+ +
+ close()
Close httplib2 connections.
+
+ create(parent, body=None, databaseId=None, x__xgafv=None)
Creates a new database.
+ +Deletes an existing database specified by the database ID.
+ +Gets the database specified by the resource name.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List all databases in a specified catalog.
+ +Retrieves the next page of results.
+
+ patch(name, body=None, updateMask=None, x__xgafv=None)
Updates an existing database specified by the database ID.
+close()
+ Close httplib2 connections.+
create(parent, body=None, databaseId=None, x__xgafv=None)
+ Creates a new database. + +Args: + parent: string, Required. The parent resource where this database will be created. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} (required) + body: object, The request body. + The object takes the form of: + +{ # Database is the container of tables. + "createTime": "A String", # Output only. The creation time of the database. + "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted. + "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted. + "hiveOptions": { # Options of a Hive database. # Options of a Hive database. + "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://". + "parameters": { # Stores user supplied Hive database parameters. + "a_key": "A String", + }, + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} + "type": "A String", # The database type. + "updateTime": "A String", # Output only. The last modification time of the database. +} + + databaseId: string, Required. The ID to use for the database, which will become the final component of the database's resource name. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Database is the container of tables. + "createTime": "A String", # Output only. The creation time of the database. + "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted. + "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted. + "hiveOptions": { # Options of a Hive database. # Options of a Hive database. + "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://". + "parameters": { # Stores user supplied Hive database parameters. + "a_key": "A String", + }, + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} + "type": "A String", # The database type. + "updateTime": "A String", # Output only. The last modification time of the database. +}+
delete(name, x__xgafv=None)
+ Deletes an existing database specified by the database ID. + +Args: + name: string, Required. The name of the database to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Database is the container of tables. + "createTime": "A String", # Output only. The creation time of the database. + "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted. + "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted. + "hiveOptions": { # Options of a Hive database. # Options of a Hive database. + "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://". + "parameters": { # Stores user supplied Hive database parameters. + "a_key": "A String", + }, + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} + "type": "A String", # The database type. + "updateTime": "A String", # Output only. The last modification time of the database. +}+
get(name, x__xgafv=None)
+ Gets the database specified by the resource name. + +Args: + name: string, Required. The name of the database to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Database is the container of tables. + "createTime": "A String", # Output only. The creation time of the database. + "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted. + "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted. + "hiveOptions": { # Options of a Hive database. # Options of a Hive database. + "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://". + "parameters": { # Stores user supplied Hive database parameters. + "a_key": "A String", + }, + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} + "type": "A String", # The database type. + "updateTime": "A String", # Output only. The last modification time of the database. +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List all databases in a specified catalog. + +Args: + parent: string, Required. The parent, which owns this collection of databases. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} (required) + pageSize: integer, The maximum number of databases to return. The service may return fewer than this value. If unspecified, at most 50 databases will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000. + pageToken: string, A page token, received from a previous `ListDatabases` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListDatabases` must match the call that provided the page token. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for the ListDatabases method. + "databases": [ # The databases from the specified catalog. + { # Database is the container of tables. + "createTime": "A String", # Output only. The creation time of the database. + "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted. + "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted. + "hiveOptions": { # Options of a Hive database. # Options of a Hive database. + "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://". + "parameters": { # Stores user supplied Hive database parameters. + "a_key": "A String", + }, + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} + "type": "A String", # The database type. + "updateTime": "A String", # Output only. The last modification time of the database. + }, + ], + "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. +}+
list_next()
+ Retrieves the next page of results. + + Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + + Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
patch(name, body=None, updateMask=None, x__xgafv=None)
+ Updates an existing database specified by the database ID. + +Args: + name: string, Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required) + body: object, The request body. + The object takes the form of: + +{ # Database is the container of tables. + "createTime": "A String", # Output only. The creation time of the database. + "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted. + "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted. + "hiveOptions": { # Options of a Hive database. # Options of a Hive database. + "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://". + "parameters": { # Stores user supplied Hive database parameters. + "a_key": "A String", + }, + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} + "type": "A String", # The database type. + "updateTime": "A String", # Output only. The last modification time of the database. +} + + updateMask: string, The list of fields to update. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Database is the container of tables. + "createTime": "A String", # Output only. The creation time of the database. + "deleteTime": "A String", # Output only. The deletion time of the database. Only set after the database is deleted. + "expireTime": "A String", # Output only. The time when this database is considered expired. Only set after the database is deleted. + "hiveOptions": { # Options of a Hive database. # Options of a Hive database. + "locationUri": "A String", # Cloud Storage folder URI where the database data is stored, starting with "gs://". + "parameters": { # Stores user supplied Hive database parameters. + "a_key": "A String", + }, + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} + "type": "A String", # The database type. + "updateTime": "A String", # Output only. The last modification time of the database. +}+
+ close()
Close httplib2 connections.
+
+ create(parent, body=None, tableId=None, x__xgafv=None)
Creates a new table.
+ +Deletes an existing table specified by the table ID.
+ +Gets the table specified by the resource name.
+
+ list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None)
List all tables in a specified database.
+ +Retrieves the next page of results.
+
+ patch(name, body=None, updateMask=None, x__xgafv=None)
Updates an existing table specified by the table ID.
+
+ rename(name, body=None, x__xgafv=None)
Renames an existing table specified by the table ID.
+close()
+ Close httplib2 connections.+
create(parent, body=None, tableId=None, x__xgafv=None)
+ Creates a new table. + +Args: + parent: string, Required. The parent resource where this table will be created. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required) + body: object, The request body. + The object takes the form of: + +{ # Represents a table. + "createTime": "A String", # Output only. The creation time of the table. + "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted. + "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations. + "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted. + "hiveOptions": { # Options of a Hive table. # Options of a Hive table. + "parameters": { # Stores user supplied Hive table parameters. + "a_key": "A String", + }, + "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data. + "inputFormat": "A String", # The fully qualified Java class name of the input format. + "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://". + "outputFormat": "A String", # The fully qualified Java class name of the output format. + "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information. + "serializationLib": "A String", # The fully qualified Java class name of the serialization library. + }, + }, + "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + "type": "A String", # The table type. + "updateTime": "A String", # Output only. The last modification time of the table. +} + + tableId: string, Required. The ID to use for the table, which will become the final component of the table's resource name. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Represents a table. + "createTime": "A String", # Output only. The creation time of the table. + "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted. + "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations. + "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted. + "hiveOptions": { # Options of a Hive table. # Options of a Hive table. + "parameters": { # Stores user supplied Hive table parameters. + "a_key": "A String", + }, + "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data. + "inputFormat": "A String", # The fully qualified Java class name of the input format. + "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://". + "outputFormat": "A String", # The fully qualified Java class name of the output format. + "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information. + "serializationLib": "A String", # The fully qualified Java class name of the serialization library. + }, + }, + "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + "type": "A String", # The table type. + "updateTime": "A String", # Output only. The last modification time of the table. +}+
delete(name, x__xgafv=None)
+ Deletes an existing table specified by the table ID. + +Args: + name: string, Required. The name of the table to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Represents a table. + "createTime": "A String", # Output only. The creation time of the table. + "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted. + "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations. + "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted. + "hiveOptions": { # Options of a Hive table. # Options of a Hive table. + "parameters": { # Stores user supplied Hive table parameters. + "a_key": "A String", + }, + "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data. + "inputFormat": "A String", # The fully qualified Java class name of the input format. + "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://". + "outputFormat": "A String", # The fully qualified Java class name of the output format. + "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information. + "serializationLib": "A String", # The fully qualified Java class name of the serialization library. + }, + }, + "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + "type": "A String", # The table type. + "updateTime": "A String", # Output only. The last modification time of the table. +}+
get(name, x__xgafv=None)
+ Gets the table specified by the resource name. + +Args: + name: string, Required. The name of the table to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Represents a table. + "createTime": "A String", # Output only. The creation time of the table. + "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted. + "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations. + "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted. + "hiveOptions": { # Options of a Hive table. # Options of a Hive table. + "parameters": { # Stores user supplied Hive table parameters. + "a_key": "A String", + }, + "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data. + "inputFormat": "A String", # The fully qualified Java class name of the input format. + "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://". + "outputFormat": "A String", # The fully qualified Java class name of the output format. + "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information. + "serializationLib": "A String", # The fully qualified Java class name of the serialization library. + }, + }, + "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + "type": "A String", # The table type. + "updateTime": "A String", # Output only. The last modification time of the table. +}+
list(parent, pageSize=None, pageToken=None, view=None, x__xgafv=None)
+ List all tables in a specified database. + +Args: + parent: string, Required. The parent, which owns this collection of tables. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id} (required) + pageSize: integer, The maximum number of tables to return. The service may return fewer than this value. If unspecified, at most 50 tables will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000. + pageToken: string, A page token, received from a previous `ListTables` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListTables` must match the call that provided the page token. + view: string, The view for the returned tables. + Allowed values + TABLE_VIEW_UNSPECIFIED - Default value. The API will default to the BASIC view. + BASIC - Include only table names. This is the default value. + FULL - Include everything. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for the ListTables method. + "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. + "tables": [ # The tables from the specified database. + { # Represents a table. + "createTime": "A String", # Output only. The creation time of the table. + "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted. + "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations. + "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted. + "hiveOptions": { # Options of a Hive table. # Options of a Hive table. + "parameters": { # Stores user supplied Hive table parameters. + "a_key": "A String", + }, + "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data. + "inputFormat": "A String", # The fully qualified Java class name of the input format. + "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://". + "outputFormat": "A String", # The fully qualified Java class name of the output format. + "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information. + "serializationLib": "A String", # The fully qualified Java class name of the serialization library. + }, + }, + "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + "type": "A String", # The table type. + "updateTime": "A String", # Output only. The last modification time of the table. + }, + ], +}+
list_next()
+ Retrieves the next page of results. + + Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + + Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
patch(name, body=None, updateMask=None, x__xgafv=None)
+ Updates an existing table specified by the table ID. + +Args: + name: string, Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} (required) + body: object, The request body. + The object takes the form of: + +{ # Represents a table. + "createTime": "A String", # Output only. The creation time of the table. + "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted. + "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations. + "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted. + "hiveOptions": { # Options of a Hive table. # Options of a Hive table. + "parameters": { # Stores user supplied Hive table parameters. + "a_key": "A String", + }, + "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data. + "inputFormat": "A String", # The fully qualified Java class name of the input format. + "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://". + "outputFormat": "A String", # The fully qualified Java class name of the output format. + "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information. + "serializationLib": "A String", # The fully qualified Java class name of the serialization library. + }, + }, + "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + "type": "A String", # The table type. + "updateTime": "A String", # Output only. The last modification time of the table. +} + + updateMask: string, The list of fields to update. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Represents a table. + "createTime": "A String", # Output only. The creation time of the table. + "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted. + "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations. + "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted. + "hiveOptions": { # Options of a Hive table. # Options of a Hive table. + "parameters": { # Stores user supplied Hive table parameters. + "a_key": "A String", + }, + "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data. + "inputFormat": "A String", # The fully qualified Java class name of the input format. + "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://". + "outputFormat": "A String", # The fully qualified Java class name of the output format. + "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information. + "serializationLib": "A String", # The fully qualified Java class name of the serialization library. + }, + }, + "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + "type": "A String", # The table type. + "updateTime": "A String", # Output only. The last modification time of the table. +}+
rename(name, body=None, x__xgafv=None)
+ Renames an existing table specified by the table ID. + +Args: + name: string, Required. The table's `name` field is used to identify the table to rename. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} (required) + body: object, The request body. + The object takes the form of: + +{ # Request message for the RenameTable method in MetastoreService + "newName": "A String", # Required. The new `name` for the specified table, must be in the same database. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Represents a table. + "createTime": "A String", # Output only. The creation time of the table. + "deleteTime": "A String", # Output only. The deletion time of the table. Only set after the table is deleted. + "etag": "A String", # The checksum of a table object computed by the server based on the value of other fields. It may be sent on update requests to ensure the client has an up-to-date value before proceeding. It is only checked for update table operations. + "expireTime": "A String", # Output only. The time when this table is considered expired. Only set after the table is deleted. + "hiveOptions": { # Options of a Hive table. # Options of a Hive table. + "parameters": { # Stores user supplied Hive table parameters. + "a_key": "A String", + }, + "storageDescriptor": { # Stores physical storage information of the data. # Stores physical storage information of the data. + "inputFormat": "A String", # The fully qualified Java class name of the input format. + "locationUri": "A String", # Cloud Storage folder URI where the table data is stored, starting with "gs://". + "outputFormat": "A String", # The fully qualified Java class name of the output format. + "serdeInfo": { # Serializer and deserializer information. # Serializer and deserializer information. + "serializationLib": "A String", # The fully qualified Java class name of the serialization library. + }, + }, + "tableType": "A String", # Hive table type. For example, MANAGED_TABLE, EXTERNAL_TABLE. + }, + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id}/databases/{database_id}/tables/{table_id} + "type": "A String", # The table type. + "updateTime": "A String", # Output only. The last modification time of the table. +}+
+ databases()
+
Returns the databases Resource.
+ +
+ close()
Close httplib2 connections.
+
+ create(parent, body=None, catalogId=None, x__xgafv=None)
Creates a new catalog.
+ +Deletes an existing catalog specified by the catalog ID.
+ +Gets the catalog specified by the resource name.
+
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)
List all catalogs in a specified project.
+ +Retrieves the next page of results.
+close()
+ Close httplib2 connections.+
create(parent, body=None, catalogId=None, x__xgafv=None)
+ Creates a new catalog. + +Args: + parent: string, Required. The parent resource where this catalog will be created. Format: projects/{project_id_or_number}/locations/{location_id} (required) + body: object, The request body. + The object takes the form of: + +{ # Catalog is the container of databases. + "createTime": "A String", # Output only. The creation time of the catalog. + "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted. + "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted. + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} + "updateTime": "A String", # Output only. The last modification time of the catalog. +} + + catalogId: string, Required. The ID to use for the catalog, which will become the final component of the catalog's resource name. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Catalog is the container of databases. + "createTime": "A String", # Output only. The creation time of the catalog. + "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted. + "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted. + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} + "updateTime": "A String", # Output only. The last modification time of the catalog. +}+
delete(name, x__xgafv=None)
+ Deletes an existing catalog specified by the catalog ID. + +Args: + name: string, Required. The name of the catalog to delete. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Catalog is the container of databases. + "createTime": "A String", # Output only. The creation time of the catalog. + "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted. + "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted. + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} + "updateTime": "A String", # Output only. The last modification time of the catalog. +}+
get(name, x__xgafv=None)
+ Gets the catalog specified by the resource name. + +Args: + name: string, Required. The name of the catalog to retrieve. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Catalog is the container of databases. + "createTime": "A String", # Output only. The creation time of the catalog. + "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted. + "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted. + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} + "updateTime": "A String", # Output only. The last modification time of the catalog. +}+
list(parent, pageSize=None, pageToken=None, x__xgafv=None)
+ List all catalogs in a specified project. + +Args: + parent: string, Required. The parent, which owns this collection of catalogs. Format: projects/{project_id_or_number}/locations/{location_id} (required) + pageSize: integer, The maximum number of catalogs to return. The service may return fewer than this value. If unspecified, at most 50 catalogs will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000. + pageToken: string, A page token, received from a previous `ListCatalogs` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListCatalogs` must match the call that provided the page token. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for the ListCatalogs method. + "catalogs": [ # The catalogs from the specified project. + { # Catalog is the container of databases. + "createTime": "A String", # Output only. The creation time of the catalog. + "deleteTime": "A String", # Output only. The deletion time of the catalog. Only set after the catalog is deleted. + "expireTime": "A String", # Output only. The time when this catalog is considered expired. Only set after the catalog is deleted. + "name": "A String", # Output only. The resource name. Format: projects/{project_id_or_number}/locations/{location_id}/catalogs/{catalog_id} + "updateTime": "A String", # Output only. The last modification time of the catalog. + }, + ], + "nextPageToken": "A String", # A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages. +}+
list_next()
+ Retrieves the next page of results. + + Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + + Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++
+ catalogs()
+
Returns the catalogs Resource.
+ +
+ close()
Close httplib2 connections.
+close()
+ Close httplib2 connections.+
Returns the projects Resource.
+
+ subAccounts()
+
Returns the subAccounts Resource.
+Close httplib2 connections.
- create(body=None, x__xgafv=None)
create(body=None, parent=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
@@ -92,11 +97,14 @@getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)
Gets the access control policy for a billing account. The caller must have the `billing.accounts.getIamPolicy` permission on the account, which is often given to billing account [viewers](https://cloud.google.com/billing/docs/how-to/billing-access).
- list(filter=None, pageSize=None, pageToken=None, x__xgafv=None)
list(filter=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
Retrieves the next page of results.
+
+ move(name, body=None, x__xgafv=None)
Changes which parent organization a billing account belongs to.
patch(name, body=None, updateMask=None, x__xgafv=None)
Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account.
@@ -113,7 +121,7 @@create(body=None, x__xgafv=None)
+ create(body=None, parent=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts. Args: @@ -127,6 +135,7 @@Method Details
"open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. } + parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -214,13 +223,14 @@Method Details
list(filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+ list(filter=None, pageSize=None, pageToken=None, parent=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access). Args: filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported. pageSize: integer, Requested page size. The maximum page size is 100; this is also the default. pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned. + parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -256,6 +266,35 @@Method Details
move(name, body=None, x__xgafv=None)
+ Changes which parent organization a billing account belongs to. + +Args: + name: string, Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account. (required) + body: object, The request body. + The object takes the form of: + +{ # Request message for `MoveBillingAccount` RPC. + "destinationParent": "A String", # Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +}+
patch(name, body=None, updateMask=None, x__xgafv=None)
Updates a billing account's fields. Currently the only field that can be edited is `display_name`. The current authenticated user must have the `billing.accounts.update` IAM permission, which is typically given to the [administrator](https://cloud.google.com/billing/docs/how-to/billing-access) of the billing account. diff --git a/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html b/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html new file mode 100644 index 00000000000..b71c2d190ee --- /dev/null +++ b/docs/dyn/cloudbilling_v1.billingAccounts.subAccounts.html @@ -0,0 +1,171 @@ + + + +@@ -208,7 +208,7 @@Cloud Billing API . billingAccounts . subAccounts
+Instance Methods
++
+close()
Close httplib2 connections.
++
+create(parent, body=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
++
+list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
+ +Retrieves the next page of results.
+Method Details
+++ +close()
+Close httplib2 connections.+++ +create(parent, body=None, x__xgafv=None)
+This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts. + +Args: + parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required) + body: object, The request body. + The object takes the form of: + +{ # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +}+++ +list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access). + +Args: + parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required) + filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported. + pageSize: integer, Requested page size. The maximum page size is 100; this is also the default. + pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for `ListBillingAccounts`. + "billingAccounts": [ # A list of billing accounts. + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. + }, + ], + "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve. +}+++ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1.html b/docs/dyn/cloudbilling_v1.html index 1d4da4cfcfe..a8b7320e249 100644 --- a/docs/dyn/cloudbilling_v1.html +++ b/docs/dyn/cloudbilling_v1.html @@ -79,6 +79,11 @@list_next()
+Retrieves the next page of results. + + Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + + Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++Instance Methods
Returns the billingAccounts Resource.
++
+organizations()
+Returns the organizations Resource.
+ diff --git a/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html b/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html new file mode 100644 index 00000000000..0a77281241d --- /dev/null +++ b/docs/dyn/cloudbilling_v1.organizations.billingAccounts.html @@ -0,0 +1,197 @@ + + + +Cloud Billing API . organizations . billingAccounts
+Instance Methods
++
+close()
Close httplib2 connections.
++
+create(parent, body=None, x__xgafv=None)
This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts.
++
+list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access).
+ +Retrieves the next page of results.
++
+move(destinationParent, name, x__xgafv=None)
Changes which parent organization a billing account belongs to.
+Method Details
+++ +close()
+Close httplib2 connections.+++ +create(parent, body=None, x__xgafv=None)
+This method creates [billing subaccounts](https://cloud.google.com/billing/docs/concepts#subaccounts). Google Cloud resellers should use the Channel Services APIs, [accounts.customers.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers/create) and [accounts.customers.entitlements.create](https://cloud.google.com/channel/docs/reference/rest/v1/accounts.customers.entitlements/create). When creating a subaccount, the current authenticated user must have the `billing.accounts.update` IAM permission on the parent account, which is typically given to billing account [administrators](https://cloud.google.com/billing/docs/how-to/billing-access). This method will return an error if the parent account has not been provisioned for subaccounts. + +Args: + parent: string, Optional. The parent to create a billing account from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required) + body: object, The request body. + The object takes the form of: + +{ # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +} + + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +}+++ +list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
+Lists the billing accounts that the current authenticated user has permission to [view](https://cloud.google.com/billing/docs/how-to/billing-access). + +Args: + parent: string, Optional. The parent resource to list billing accounts from. Format: - organizations/{organization_id} eg organizations/12345678 - billingAccounts/{billing_account_id} eg `billingAccounts/012345-567890-ABCDEF` (required) + filter: string, Options for how to filter the returned billing accounts. This only supports filtering for [subaccounts](https://cloud.google.com/billing/docs/concepts) under a single provided parent billing account. (e.g. "master_billing_account=billingAccounts/012345-678901-ABCDEF"). Boolean algebra and other fields are not currently supported. + pageSize: integer, Requested page size. The maximum page size is 100; this is also the default. + pageToken: string, A token identifying a page of results to return. This should be a `next_page_token` value returned from a previous `ListBillingAccounts` call. If unspecified, the first page of results is returned. + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Response message for `ListBillingAccounts`. + "billingAccounts": [ # A list of billing accounts. + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. + }, + ], + "nextPageToken": "A String", # A token to retrieve the next page of results. To retrieve the next page, call `ListBillingAccounts` again with the `page_token` field set to this value. This field is empty if there are no more results to retrieve. +}+++ +list_next()
+Retrieves the next page of results. + + Args: + previous_request: The request for the previous page. (required) + previous_response: The response from the request for the previous page. (required) + + Returns: + A request object that you can call 'execute()' on to request the next + page. Returns None if there are no more items in the collection. ++++ + \ No newline at end of file diff --git a/docs/dyn/cloudbilling_v1.organizations.html b/docs/dyn/cloudbilling_v1.organizations.html new file mode 100644 index 00000000000..18fdabc9c7c --- /dev/null +++ b/docs/dyn/cloudbilling_v1.organizations.html @@ -0,0 +1,91 @@ + + + +move(destinationParent, name, x__xgafv=None)
+Changes which parent organization a billing account belongs to. + +Args: + destinationParent: string, Required. The resource name of the Organization to reparent the billing account under. Must be of the form `organizations/{organization_id}`. (required) + name: string, Required. The resource name of the billing account to move. Must be of the form `billingAccounts/{billing_account_id}`. The specified billing account cannot be a subaccount, since a subaccount always belongs to the same organization as its parent account. (required) + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # A billing account in the [Google Cloud Console](https://console.cloud.google.com/). You can assign a billing account to one or more projects. + "displayName": "A String", # The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console. + "masterBillingAccount": "A String", # If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty. + "name": "A String", # Output only. The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`. + "open": True or False, # Output only. True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services. +}+Cloud Billing API . organizations
+Instance Methods
+ +Returns the billingAccounts Resource.
+ ++
+close()
Close httplib2 connections.
+Method Details
+++ + \ No newline at end of file diff --git a/docs/dyn/cloudtasks_v2.projects.locations.html b/docs/dyn/cloudtasks_v2.projects.locations.html index bba06f9b352..fd11c8e9af7 100644 --- a/docs/dyn/cloudtasks_v2.projects.locations.html +++ b/docs/dyn/cloudtasks_v2.projects.locations.html @@ -144,7 +144,7 @@close()
+Close httplib2 connections.+Method Details
Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }
Creates a queue. Queues created with this method allow tasks to live for a maximum of 31 days. After a task is 31 days old, the task will be deleted regardless of whether it was dispatched or not. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
-Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
+Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
Gets a queue.
@@ -198,7 +198,7 @@delete(name, x__xgafv=None)
- Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method. +diff --git a/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html b/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html index f35de98fdd2..26ccc9ddb32 100644 --- a/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html +++ b/docs/dyn/cloudtasks_v2beta2.projects.locations.queues.tasks.html @@ -79,7 +79,7 @@Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method. Args: name: string, Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` (required) diff --git a/docs/dyn/cloudtasks_v2beta2.projects.locations.html b/docs/dyn/cloudtasks_v2beta2.projects.locations.html index 4835bd73b56..93dd5dd6bd0 100644 --- a/docs/dyn/cloudtasks_v2beta2.projects.locations.html +++ b/docs/dyn/cloudtasks_v2beta2.projects.locations.html @@ -144,7 +144,7 @@@@ -208,7 +208,7 @@Method Details
Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }Method Details
body: object, The request body. The object takes the form of: -{ # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. +{ # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` } @@ -222,7 +222,7 @@Method Details
Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }
Acknowledges a pull task. The worker, that is, the entity that leased this task must call this method to indicate that the work associated with the task has finished. The worker must acknowledge a task within the lease_duration or the lease will expire and the task will become available to be leased again. After the task is acknowledged, it will not be returned by a later LeaseTasks, GetTask, or ListTasks.
buffer(queue, taskId, body=None, x__xgafv=None)
Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8).
+Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer.
cancelLease(name, body=None, x__xgafv=None)
Cancel a pull task's lease. The worker can use this method to cancel a task's lease by setting its schedule_time to now. This will make the task available to be leased to the next caller of LeaseTasks.
@@ -138,7 +138,7 @@buffer(queue, taskId, body=None, x__xgafv=None)
- Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8). +diff --git a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.html b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.html index 450073374fc..8d4289fdc6a 100644 --- a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.html +++ b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.html @@ -87,7 +87,7 @@Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Args: queue: string, Required. The parent queue name. For example: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` The queue must already exist. (required) diff --git a/docs/dyn/cloudtasks_v2beta3.projects.locations.html b/docs/dyn/cloudtasks_v2beta3.projects.locations.html index fe3ecf906ff..ff79a5b942d 100644 --- a/docs/dyn/cloudtasks_v2beta3.projects.locations.html +++ b/docs/dyn/cloudtasks_v2beta3.projects.locations.html @@ -144,7 +144,7 @@@@ -208,7 +208,7 @@Method Details
Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }Method Details
body: object, The request body. The object takes the form of: -{ # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. +{ # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` } @@ -222,7 +222,7 @@Method Details
Returns: An object of the form: - { # CMEK, or Customer Managed Encryption Keys, enables GCP products to put control over encryption and key management in their customer’s hands. + { # Describes the customer-managed encryption key (CMEK) configuration associated with a project and location. "kmsKey": "A String", # Resource name of the Cloud KMS key, of the form `projects/PROJECT_ID/locations/LOCATION_ID/keyRings/KEY_RING_ID/cryptoKeys/KEY_ID`, that will be used to encrypt the Queues & Tasks in the region. Setting this as blank will turn off CMEK encryption. "name": "A String", # Output only. The config resource name which includes the project and location and must end in 'cmekConfig', in the format projects/PROJECT_ID/locations/LOCATION_ID/cmekConfig` }
Creates a queue. Queues created with this method allow tasks to live for a maximum of 31 days. After a task is 31 days old, the task will be deleted regardless of whether it was dispatched or not. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
-Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
+Deletes a queue. This command will delete the queue even if it has tasks in it. Note : If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.
get(name, readMask=None, x__xgafv=None)
Gets a queue.
@@ -284,7 +284,7 @@delete(name, x__xgafv=None)
- Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method. +Deletes a queue. This command will delete the queue even if it has tasks in it. Note : If you delete a queue, you may be prevented from creating a new queue with the same name as the deleted queue for a tombstone window of up to 3 days. During this window, the CreateQueue operation may appear to recreate the queue, but this can be misleading. If you attempt to create a queue with the same name as one that is in the tombstone window, run GetQueue to confirm that the queue creation was successful. If GetQueue returns 200 response code, your queue was successfully created with the name of the previously deleted queue. Otherwise, your queue did not successfully recreate. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method. Args: name: string, Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` (required) diff --git a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html index 7eaa3977830..f465c41b0bd 100644 --- a/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html +++ b/docs/dyn/cloudtasks_v2beta3.projects.locations.queues.tasks.html @@ -76,7 +76,7 @@Cloud Tasks API .
buffer(queue, taskId, body=None, x__xgafv=None)
-Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8).
+Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer.
Close httplib2 connections.
@@ -101,7 +101,7 @@Instance Methods
Method Details
+buffer(queue, taskId, body=None, x__xgafv=None)
-Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Note: This feature is in its experimental stage. You must request access to the API through the [Cloud Tasks BufferTask Experiment Signup form](https://forms.gle/X8Zr5hiXH5tTGFqh8). +Creates and buffers a new task without the need to explicitly define a Task message. The queue must have HTTP target. To create the task with a custom ID, use the following format and set TASK_ID to your desired ID: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID:buffer To create the task with an automatically generated ID, use the following format: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks:buffer. Args: queue: string, Required. The parent queue name. For example: projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` The queue must already exist. (required) diff --git a/docs/dyn/compute_alpha.backendServices.html b/docs/dyn/compute_alpha.backendServices.html index 893ad98a1d7..88e12120e2f 100644 --- a/docs/dyn/compute_alpha.backendServices.html +++ b/docs/dyn/compute_alpha.backendServices.html @@ -418,7 +418,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1162,7 +1162,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1748,7 +1748,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2316,7 +2316,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2782,7 +2782,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -3264,7 +3264,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -4324,7 +4324,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. diff --git a/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html b/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html index 7fa514bb510..31c566477c9 100644 --- a/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html +++ b/docs/dyn/compute_alpha.globalNetworkEndpointGroups.html @@ -127,7 +127,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -402,7 +401,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -920,7 +918,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/docs/dyn/compute_alpha.instances.html b/docs/dyn/compute_alpha.instances.html index 586ad69f55c..a76ab2114c2 100644 --- a/docs/dyn/compute_alpha.instances.html +++ b/docs/dyn/compute_alpha.instances.html @@ -101,6 +101,9 @@Instance Methods
Deletes an access config from an instance's network interface.
++
+deleteNetworkInterface(project, zone, instance, networkInterfaceName, requestId=None, x__xgafv=None)
Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now.
detachDisk(project, zone, instance, deviceName, requestId=None, x__xgafv=None)
Detaches a disk from an instance.
@@ -821,6 +824,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -1956,6 +1962,133 @@Method Details
}++deleteNetworkInterface(project, zone, instance, networkInterfaceName, requestId=None, x__xgafv=None)
+Deletes one network interface from an active instance. InstancesDeleteNetworkInterfaceRequest indicates: - instance from which to delete, using project+zone+resource_id fields; - network interface to be deleted, using network_interface_name field; Only VLAN interface deletion is supported for now. + +Args: + project: string, Project ID for this request. (required) + zone: string, The name of the zone for this request. (required) + instance: string, The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer. (required) + networkInterfaceName: string, The name of the network interface to be deleted from the instance. Only VLAN network interface deletion is supported. (required) + requestId: string, An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). + x__xgafv: string, V1 error format. + Allowed values + 1 - v1 error format + 2 - v2 error format + +Returns: + An object of the form: + + { # Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/alpha/globalOperations) * [Regional](/compute/docs/reference/rest/alpha/regionOperations) * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources. + "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request. Not present otherwise. + "creationTimestamp": "A String", # [Deprecated] This field is deprecated. + "description": "A String", # [Output Only] A textual description of the operation, which is set when the operation is created. + "endTime": "A String", # [Output Only] The time that this operation was completed. This value is in RFC3339 text format. + "error": { # [Output Only] If errors are generated during processing of the operation, this field will be populated. + "errors": [ # [Output Only] The array of errors encountered while processing this operation. + { + "code": "A String", # [Output Only] The error type identifier for this error. + "errorDetails": [ # [Output Only] An optional list of messages that contain the error details. There is a set of defined message types to use for providing details.The syntax depends on the error code. For example, QuotaExceededInfo will have details when the error code is QUOTA_EXCEEDED. + { + "errorInfo": { # Describes the cause of the error with structured details. Example of an error when contacting the "pubsub.googleapis.com" API when it is not enabled: { "reason": "API_DISABLED" "domain": "googleapis.com" "metadata": { "resource": "projects/123", "service": "pubsub.googleapis.com" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { "reason": "STOCKOUT" "domain": "spanner.googleapis.com", "metadata": { "availableRegions": "us-central1,us-east2" } } + "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: "pubsub.googleapis.com". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is "googleapis.com". + "metadatas": { # Additional structured details about this error. Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than {"instanceLimit": "100/request"}, should be returned as, {"instanceLimitPerRequest": "100"}, if the client exceeds the number of instances that can be created in a single (batch) request. + "a_key": "A String", + }, + "reason": "A String", # The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE. + }, + "help": { # Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit. + "links": [ # URL(s) pointing to additional information on handling the current error. + { # Describes a URL link. + "description": "A String", # Describes what the link offers. + "url": "A String", # The URL of the link. + }, + ], + }, + "localizedMessage": { # Provides a localized error message that is safe to return to the user which can be attached to an RPC error. + "locale": "A String", # The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: "en-US", "fr-CH", "es-MX" + "message": "A String", # The localized error message in the above locale. + }, + "quotaInfo": { # Additional details for quota exceeded error for resource quota. + "dimensions": { # The map holding related quota dimensions. + "a_key": "A String", + }, + "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota type or metric. + "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type or metric. + "limitName": "A String", # The name of the quota limit. + "metricName": "A String", # The Compute Engine quota metric name. + "rolloutStatus": "A String", # Rollout status of the future quota limit. + }, + }, + ], + "location": "A String", # [Output Only] Indicates the field in the request that caused the error. This property is optional. + "message": "A String", # [Output Only] An optional, human-readable error message. + }, + ], + }, + "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error message that was returned, such as `NOT FOUND`. + "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error status code that was returned. For example, a `404` means the resource was not found. + "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is defined by the server. + "insertTime": "A String", # [Output Only] The time that this operation was requested. This value is in RFC3339 text format. + "instancesBulkInsertOperationMetadata": { + "perLocationStatus": { # Status information per location (location name is key). Example key: zones/us-central1-a + "a_key": { + "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far. + "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback. + "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an error. + "status": "A String", # [Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back. + "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created. + }, + }, + }, + "kind": "compute#operation", # [Output Only] Type of the resource. Always `compute#operation` for Operation resources. + "name": "A String", # [Output Only] Name of the operation. + "operationGroupId": "A String", # [Output Only] An ID that represents a group of operations, such as when a group of operations results from a `bulkInsert` API request. + "operationType": "A String", # [Output Only] The type of operation, such as `insert`, `update`, or `delete`, and so on. + "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess when the operation will be complete. This number should monotonically increase as the operation progresses. + "region": "A String", # [Output Only] The URL of the region where the operation resides. Only applicable when performing regional operations. + "selfLink": "A String", # [Output Only] Server-defined URL for the resource. + "selfLinkWithId": "A String", # [Output Only] Server-defined URL for this resource with the resource id. + "setCommonInstanceMetadataOperationMetadata": { # [Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state. + "clientOperationId": "A String", # [Output Only] The client operation id. + "perLocationOperations": { # [Output Only] Status information per location (location name is key). Example key: zones/us-central1-a + "a_key": { + "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # [Output Only] If state is `ABANDONED` or `FAILED`, this field is populated. + "code": 42, # The status code, which should be an enum value of google.rpc.Code. + "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use. + { + "a_key": "", # Properties of the object. Contains field @type with type URL. + }, + ], + "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client. + }, + "state": "A String", # [Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`. + }, + }, + }, + "startTime": "A String", # [Output Only] The time that this operation was started by the server. This value is in RFC3339 text format. + "status": "A String", # [Output Only] The status of the operation, which can be one of the following: `PENDING`, `RUNNING`, or `DONE`. + "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the operation. + "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation of the target resource. + "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For operations related to creating a snapshot, this points to the persistent disk that the snapshot was created from. + "user": "A String", # [Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`. + "warnings": [ # [Output Only] If warning messages are generated during processing of the operation, this field will be populated. + { + "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response. + "data": [ # [Output Only] Metadata about this warning in key: value format. For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" } + { + "key": "A String", # [Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding). + "value": "A String", # [Output Only] A warning data value corresponding to the key. + }, + ], + "message": "A String", # [Output Only] A human-readable description of the warning code. + }, + ], + "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only applicable when performing per-zone operations. +}+@@ -3763,6 +3781,9 @@detachDisk(project, zone, instance, deviceName, requestId=None, x__xgafv=None)
Detaches a disk from an instance. @@ -2373,6 +2506,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -3387,6 +3523,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -3923,6 +4062,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. @@ -7913,6 +8055,9 @@Method Details
"A String", ], "resourceStatus": { # Contains output only fields. Use this sub-message for actual values set on Instance attributes as compared to the value requested by the user (intent) in their instance CRUD calls. # [Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field. + "lastInstanceTerminationDetails": { # [Output Only] Contains last termination details why the instance was terminated. + "terminationReason": "A String", # Reason for termination + }, "physicalHost": "A String", # [Output Only] An opaque ID of the host on which the VM is running. "scheduling": { "availabilityDomain": 42, # Specifies the availability domain (AD), which this instance should be scheduled on. The AD belongs to the spread GroupPlacementPolicy resource policy that has been assigned to the instance. Specify a value between 1-max count of availability domains in your GroupPlacementPolicy. See go/placement-policy-extension for more details. diff --git a/docs/dyn/compute_alpha.networkEndpointGroups.html b/docs/dyn/compute_alpha.networkEndpointGroups.html index 25a04229089..452f84d77cf 100644 --- a/docs/dyn/compute_alpha.networkEndpointGroups.html +++ b/docs/dyn/compute_alpha.networkEndpointGroups.html @@ -262,7 +262,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -539,7 +538,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -1032,7 +1030,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], @@ -1084,7 +1081,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/docs/dyn/compute_alpha.regionBackendServices.html b/docs/dyn/compute_alpha.regionBackendServices.html index 04515df5d88..7830bd20995 100644 --- a/docs/dyn/compute_alpha.regionBackendServices.html +++ b/docs/dyn/compute_alpha.regionBackendServices.html @@ -391,7 +391,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -980,7 +980,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -1549,7 +1549,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2016,7 +2016,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -2499,7 +2499,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. @@ -3431,7 +3431,7 @@Method Details
"oauth2ClientSecretSha256": "A String", # [Output Only] SHA256 hash value for the field oauth2_client_secret above. }, "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. - "ipAddressSelectionPolicy": "A String", # Specifies preference of traffic to the backend (from the proxy and from the client for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv4 health-checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoints IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the Backend Service (Instance Group, Managed Instance Group, Network Endpoint Group) regardless of traffic from the client to the proxy. Only IPv6 health-checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). + "ipAddressSelectionPolicy": "A String", # Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). "kind": "compute#backendService", # [Output Only] Type of resource. Always compute#backendService for backend services. "loadBalancingScheme": "A String", # Specifies the load balancer type. A backend service created for one type of load balancer cannot be used with another. For more information, refer to Choosing a load balancer. "localityLbPolicies": [ # A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration. diff --git a/docs/dyn/compute_alpha.regionCommitments.html b/docs/dyn/compute_alpha.regionCommitments.html index 0d7a28a0351..c3630aa3808 100644 --- a/docs/dyn/compute_alpha.regionCommitments.html +++ b/docs/dyn/compute_alpha.regionCommitments.html @@ -143,6 +143,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -607,6 +610,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -764,6 +770,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -1051,6 +1060,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. @@ -1270,6 +1282,9 @@Method Details
"creationTimestamp": "A String", # [Output Only] Creation timestamp in RFC3339 text format. "description": "A String", # An optional description of this resource. Provide this property when you create the resource. "endTimestamp": "A String", # [Output Only] Commitment end time in RFC3339 text format. + "existingReservations": [ # Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation + "A String", + ], "id": "A String", # [Output Only] The unique identifier for the resource. This identifier is defined by the server. "kind": "compute#commitment", # [Output Only] Type of the resource. Always compute#commitment for commitments. "licenseResource": { # Commitment for a particular license resource. # The license specification required as part of a license commitment. diff --git a/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html b/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html index 0a427d9ef5b..c36081ce1b6 100644 --- a/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html +++ b/docs/dyn/compute_alpha.regionNetworkEndpointGroups.html @@ -128,7 +128,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -405,7 +404,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, ], } @@ -927,7 +925,6 @@Method Details
"ipAddress": "A String", # Optional IPv4 address of network endpoint. The IP address must belong to a VM in Compute Engine (either the primary IP or as part of an aliased IP range). If the IP address is not specified, then the primary IP address for the VM instance in the network that the network endpoint group belongs to will be used. "ipv6Address": "A String", # Optional IPv6 address of network endpoint. "port": 42, # Optional port number of network endpoint. If not specified, the defaultPort for the network endpoint group will be used. - "zone": "A String", # The name of the zone where the instance hosting the network endpoint is located (valid only for regional GCE_VM_IP_PORT NEGs). It should comply with RFC1035. The zone must belong to the region of the Network Endpoint Group. }, }, ], diff --git a/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html b/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html index 59c9f2036ce..4f29dff02b0 100644 --- a/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html +++ b/docs/dyn/connectors_v1.projects.locations.connections.eventSubscriptions.html @@ -129,6 +129,10 @@Method Details
"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -234,6 +238,10 @@Method Details
"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -281,6 +289,10 @@Method Details
"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. @@ -337,6 +349,10 @@Method Details
"type": "A String", # type of the destination }, "eventTypeId": "A String", # Optional. Event type id of the event of current EventSubscription. + "jms": { # JMS message denotes the source of the event # Optional. JMS is the source for the event listener. + "name": "A String", # Optional. Name of the JMS source. i.e. queueName or topicName + "type": "A String", # Optional. Type of the JMS Source. i.e. Queue or Topic + }, "name": "A String", # Required. Resource name of the EventSubscription. Format: projects/{project}/locations/{location}/connections/{connection}/eventSubscriptions/{event_subscription} "status": { # EventSubscription Status denotes the status of the EventSubscription resource. # Optional. Status indicates the status of the event subscription resource "description": "A String", # Output only. Description of the state. diff --git a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html index 5caac806cf1..b1bc66442f4 100644 --- a/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html +++ b/docs/dyn/connectors_v1.projects.locations.providers.connectors.versions.html @@ -443,6 +443,7 @@Method Details
"valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. }, "enrichmentSupported": True or False, # Enrichment Supported. + "eventListenerType": "A String", # The type of the event listener for a specific connector. "isEventingSupported": True or False, # Is Eventing Supported. "registrationDestinationConfig": { # DestinationConfigTemplate defines required destinations supported by the Connector. # Registration host destination config template. "defaultPort": 42, # The default port. @@ -907,6 +908,7 @@Method Details
"valueType": "A String", # Type of the parameter: string, int, bool etc. consider custom type for the benefit for the validation. }, "enrichmentSupported": True or False, # Enrichment Supported. + "eventListenerType": "A String", # The type of the event listener for a specific connector. "isEventingSupported": True or False, # Is Eventing Supported. "registrationDestinationConfig": { # DestinationConfigTemplate defines required destinations supported by the Connector. # Registration host destination config template. "defaultPort": 42, # The default port. diff --git a/docs/dyn/content_v2_1.reports.html b/docs/dyn/content_v2_1.reports.html index b1b6dde565e..1f5c2fb248c 100644 --- a/docs/dyn/content_v2_1.reports.html +++ b/docs/dyn/content_v2_1.reports.html @@ -186,10 +186,10 @@Method Details
"priceInsights": { # Price insights fields requested by the merchant in the query. Field values are only set if the merchant queries `PriceInsightsProductView`. https://support.google.com/merchants/answer/11916926 # Price insights fields requested by the merchant in the query. Field values are only set if the merchant queries `PriceInsightsProductView`. "predictedClicksChangeFraction": 3.14, # The predicted change in clicks as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in clicks. "predictedConversionsChangeFraction": 3.14, # The predicted change in conversions as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in conversions). - "predictedGrossProfitChangeFraction": 3.14, # The predicted change in gross profit as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in gross profit. + "predictedGrossProfitChangeFraction": 3.14, # *Deprecated*: This field is no longer supported and will start returning 0. The predicted change in gross profit as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in gross profit. "predictedImpressionsChangeFraction": 3.14, # The predicted change in impressions as a fraction after introducing the suggested price compared to current active price. For example, 0.05 is a 5% predicted increase in impressions. - "predictedMonthlyGrossProfitChangeCurrencyCode": "A String", # The predicted monthly gross profit change currency (ISO 4217 code). - "predictedMonthlyGrossProfitChangeMicros": "A String", # The predicted change in gross profit in micros (1 millionth of a standard unit, 1 USD = 1000000 micros) after introducing the suggested price for a month compared to current active price. + "predictedMonthlyGrossProfitChangeCurrencyCode": "A String", # *Deprecated*: This field is no longer supported and will start returning USD for all requests. The predicted monthly gross profit change currency (ISO 4217 code). + "predictedMonthlyGrossProfitChangeMicros": "A String", # *Deprecated*: This field is no longer supported and will start returning 0. The predicted change in gross profit in micros (1 millionth of a standard unit, 1 USD = 1000000 micros) after introducing the suggested price for a month compared to current active price. "suggestedPriceCurrencyCode": "A String", # The suggested price currency (ISO 4217 code). "suggestedPriceMicros": "A String", # The latest suggested price in micros (1 millionth of a standard unit, 1 USD = 1000000 micros) for the product. }, diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.html b/docs/dyn/dataflow_v1b3.projects.jobs.html index a6b67718200..4b32309260c 100644 --- a/docs/dyn/dataflow_v1b3.projects.jobs.html +++ b/docs/dyn/dataflow_v1b3.projects.jobs.html @@ -450,6 +450,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -803,6 +804,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1140,6 +1142,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1484,6 +1487,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1886,6 +1890,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2280,6 +2285,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2611,6 +2617,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html index ff48348ee93..7f83d220197 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html @@ -518,6 +518,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.jobs.html b/docs/dyn/dataflow_v1b3.projects.locations.jobs.html index 14a22ed084f..8bc7fdb2a57 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.jobs.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.jobs.html @@ -438,6 +438,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -774,6 +775,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1118,6 +1120,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -1628,6 +1631,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2024,6 +2028,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -2354,6 +2359,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.locations.templates.html b/docs/dyn/dataflow_v1b3.projects.locations.templates.html index e269bfeeb9d..93ebcf2de23 100644 --- a/docs/dyn/dataflow_v1b3.projects.locations.templates.html +++ b/docs/dyn/dataflow_v1b3.projects.locations.templates.html @@ -435,6 +435,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -913,6 +914,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataflow_v1b3.projects.templates.html b/docs/dyn/dataflow_v1b3.projects.templates.html index 5b87b4c0768..a72f84cdf09 100644 --- a/docs/dyn/dataflow_v1b3.projects.templates.html +++ b/docs/dyn/dataflow_v1b3.projects.templates.html @@ -434,6 +434,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. @@ -912,6 +913,7 @@Method Details
"maxNumWorkers": 42, # The maximum number of workers to cap autoscaling at. This field is currently only supported for Streaming Engine jobs. "minNumWorkers": 42, # The minimum number of workers to scale down to. This field is currently only supported for Streaming Engine jobs. }, + "satisfiesPzi": True or False, # Output only. Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "satisfiesPzs": True or False, # Reserved for future use. This field is set only in responses from the server; it is ignored if it is set in any requests. "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it. { # A message describing the state of a particular execution stage. diff --git a/docs/dyn/dataplex_v1.projects.locations.dataScans.html b/docs/dyn/dataplex_v1.projects.locations.dataScans.html index 87013fad0f2..21665fad897 100644 --- a/docs/dyn/dataplex_v1.projects.locations.dataScans.html +++ b/docs/dyn/dataplex_v1.projects.locations.dataScans.html @@ -212,7 +212,7 @@Method Details
"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -531,7 +531,7 @@Method Details
"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -834,7 +834,7 @@Method Details
"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -1098,7 +1098,7 @@Method Details
"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -1379,7 +1379,7 @@Method Details
"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" diff --git a/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html b/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html index c13e15933e2..42afdd80614 100644 --- a/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html +++ b/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html @@ -191,7 +191,7 @@Method Details
"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" @@ -423,7 +423,7 @@Method Details
"samplingPercent": 3.14, # Optional. The percentage of the records to be selected from the dataset for DataScan. Value can range between 0.0 and 100.0 with up to 3 significant decimal digits. Sampling is not applied if sampling_percent is not specified, 0 or 100. }, "dataQualityResult": { # The output of a DataQualityScan. # Output only. The result of the data quality scan. - "dimensions": [ # A list of results at the dimension level. + "dimensions": [ # A list of results at the dimension level.A dimension will have a corresponding DataQualityDimensionResult if and only if there is at least one rule with the 'dimension' field set to it. { # DataQualityDimensionResult provides a more detailed, per-dimension view of the results. "dimension": { # A dimension captures data quality intent about a defined subset of the rules specified. # Output only. The dimension config specified in the DataQualitySpec, as is. "name": "A String", # The dimension name a rule belongs to. Supported dimensions are "COMPLETENESS", "ACCURACY", "CONSISTENCY", "VALIDITY", "UNIQUENESS", "INTEGRITY" diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.html b/docs/dyn/dataproc_v1.projects.locations.batches.html index 5b6849248c0..89ec5d48ffb 100644 --- a/docs/dyn/dataproc_v1.projects.locations.batches.html +++ b/docs/dyn/dataproc_v1.projects.locations.batches.html @@ -167,10 +167,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -356,10 +360,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -503,10 +511,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about batch execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.html b/docs/dyn/dataproc_v1.projects.locations.sessions.html index e210b0d9d25..a6a74d771d6 100644 --- a/docs/dyn/dataproc_v1.projects.locations.sessions.html +++ b/docs/dyn/dataproc_v1.projects.locations.sessions.html @@ -155,10 +155,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -312,10 +316,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) @@ -409,10 +417,14 @@Method Details
}, "runtimeInfo": { # Runtime information about workload execution. # Output only. Runtime information about session execution. "approximateUsage": { # Usage metrics represent approximate total resources consumed by a workload. # Output only. Approximate workload resource usage, calculated when the workload completes (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)).Note: This metric calculation may change in the future, for example, to capture cumulative workload resource consumption during workload execution (see the Dataproc Serverless release notes (https://cloud.google.com/dataproc-serverless/docs/release-notes) for announcements, changes, fixes and other Dataproc developments). + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAcceleratorSeconds": "A String", # Optional. Accelerator usage in (milliAccelerator x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuSeconds": "A String", # Optional. DCU (Dataproc Compute Units) usage in (milliDCU x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGbSeconds": "A String", # Optional. Shuffle storage usage in (GB x seconds) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). }, "currentUsage": { # The usage snapshot represents the resources consumed by a workload at a specified time. # Output only. Snapshot of current workload resource usage. + "acceleratorType": "A String", # Optional. Accelerator type being used, if any + "milliAccelerator": "A String", # Optional. Milli (one-thousandth) accelerator. (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) "milliDcu": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "milliDcuPremium": "A String", # Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at premium tier (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)). "shuffleStorageGb": "A String", # Optional. Shuffle Storage in gigabytes (GB). (see Dataproc Serverless pricing (https://cloud.google.com/dataproc-serverless/pricing)) diff --git a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html index fad3a29cc5b..90cc8926789 100644 --- a/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html @@ -128,6 +128,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -802,6 +805,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -1503,6 +1509,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2268,6 +2277,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2983,6 +2995,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -3646,6 +3661,9 @@Method Details
"version": 42, # Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. }, ], + "unreachable": [ # Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response. + "A String", + ], }Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -4437,6 +4458,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. diff --git a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html index 59bf07ace4f..84b72fa0a21 100644 --- a/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html +++ b/docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html @@ -128,6 +128,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -802,6 +805,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -1503,6 +1509,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2268,6 +2277,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -2983,6 +2995,9 @@Method Details
{ # A Dataproc workflow template resource. "createTime": "A String", # Output only. The time template was created. "dagTimeout": "A String", # Optional. Timeout duration for the DAG of jobs, expressed in seconds (see JSON representation of duration (https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a managed cluster, the cluster is deleted. + "encryptionConfig": { # Encryption settings for the encrypting customer core content. NEXT ID: 2 # Optional. Encryption settings for the encrypting customer core content. + "kmsKey": "A String", # Optional. The Cloud KMS key name to use for encrypting customer core content. + }, "id": "A String", "jobs": [ # Required. The Directed Acyclic Graph of Jobs to submit. { # A job executed by the workflow. @@ -3646,6 +3661,9 @@Method Details
"version": 42, # Optional. Used to perform a consistent read-modify-write.This field should be left blank for a CreateWorkflowTemplate request. It is required for an UpdateWorkflowTemplate request, and must match the current server version. A typical update template flow would fetch the current template with a GetWorkflowTemplate request, which will return the current template with the version field filled in with the current server version. The user updates other fields in the template, then returns it as part of the UpdateWorkflowTemplate request. }, ], + "unreachable": [ # Output only. List of workflow templates that could not be included in the response. Attempting to get one of these resources may indicate why it was not included in the list response. + "A String", + ], }
Close httplib2 connections.
create(parent, body=None, x__xgafv=None)
Creates a config for Discovery to scan and profile storage.
+Creates a config for discovery to scan and profile storage.
-Deletes a Discovery configuration.
+Deletes a discovery configuration.
-Gets a Discovery configuration.
+Gets a discovery configuration.
list(parent, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists Discovery configurations.
+Lists discovery configurations.
Retrieves the next page of results.
patch(name, body=None, x__xgafv=None)
Updates a Discovery configuration.
+Updates a discovery configuration.
close()
@@ -103,7 +103,7 @@ create(parent, body=None, x__xgafv=None)
- Creates a config for Discovery to scan and profile storage. +Creates a config for discovery to scan and profile storage. Args: parent: string, Required. Parent resource name. The format of this value is as follows: `projects/`PROJECT_ID`/locations/`LOCATION_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) @@ -111,8 +111,8 @@Method Details
The object takes the form of: { # Request message for CreateDiscoveryConfig. - "configId": "A String", # The config id can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one. - "discoveryConfig": { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. The DiscoveryConfig to create. + "configId": "A String", # The config ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one. + "discoveryConfig": { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. The DiscoveryConfig to create. "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -158,13 +158,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -173,7 +173,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -188,22 +188,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -233,7 +233,7 @@Method Details
Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -279,13 +279,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -294,7 +294,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -309,22 +309,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -348,7 +348,7 @@Method Details
diff --git a/docs/dyn/dlp_v2.projects.dlpJobs.html b/docs/dyn/dlp_v2.projects.dlpJobs.html index 5c2d308515c..3a5b083a256 100644 --- a/docs/dyn/dlp_v2.projects.dlpJobs.html +++ b/docs/dyn/dlp_v2.projects.dlpJobs.html @@ -8042,7 +8042,7 @@delete(name, x__xgafv=None)
-Deletes a Discovery configuration. +Deletes a discovery configuration. Args: name: string, Required. Resource name of the project and the config, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required) @@ -366,7 +366,7 @@Method Details
diff --git a/docs/dyn/dlp_v2.organizations.locations.storedInfoTypes.html b/docs/dyn/dlp_v2.organizations.locations.storedInfoTypes.html index 74e7856ea9c..6fed1d1ed58 100644 --- a/docs/dyn/dlp_v2.organizations.locations.storedInfoTypes.html +++ b/docs/dyn/dlp_v2.organizations.locations.storedInfoTypes.html @@ -466,9 +466,9 @@get(name, x__xgafv=None)
-Gets a Discovery configuration. +Gets a discovery configuration. Args: name: string, Required. Resource name of the project and the configuration, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required) @@ -378,7 +378,7 @@Method Details
Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -424,13 +424,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -439,7 +439,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -454,22 +454,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -493,13 +493,13 @@Method Details
@@ -642,7 +642,7 @@list(parent, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)
-Lists Discovery configurations. +Lists discovery configurations. Args: parent: string, Required. Parent resource name. The format of this value is as follows: `projects/`PROJECT_ID`/locations/`LOCATION_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) - orderBy: string, Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status. - pageSize: integer, Size of the page, can be limited by a server. - pageToken: string, Page token to continue retrieval. Comes from previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls. + orderBy: string, Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status. + pageSize: integer, Size of the page. This value can be limited by a server. + pageToken: string, Page token to continue retrieval. Comes from the previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -510,7 +510,7 @@Method Details
{ # Response message for ListDiscoveryConfigs. "discoveryConfigs": [ # List of configs, up to page_size in ListDiscoveryConfigsRequest. - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -556,13 +556,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -571,7 +571,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -586,22 +586,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -622,7 +622,7 @@Method Details
"updateTime": "A String", # Output only. The last update timestamp of a DiscoveryConfig. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDiscoveryConfigs request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDiscoveryConfigs request. }Method Details
diff --git a/docs/dyn/dlp_v2.organizations.locations.jobTriggers.html b/docs/dyn/dlp_v2.organizations.locations.jobTriggers.html index 8214312a598..7645673ecb6 100644 --- a/docs/dyn/dlp_v2.organizations.locations.jobTriggers.html +++ b/docs/dyn/dlp_v2.organizations.locations.jobTriggers.html @@ -1184,9 +1184,9 @@patch(name, body=None, x__xgafv=None)
-Updates a Discovery configuration. +Updates a discovery configuration. Args: name: string, Required. Resource name of the project and the configuration, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required) @@ -650,7 +650,7 @@Method Details
The object takes the form of: { # Request message for UpdateDiscoveryConfig. - "discoveryConfig": { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # New DiscoveryConfig value. + "discoveryConfig": { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. New DiscoveryConfig value. "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -696,13 +696,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -711,7 +711,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -726,22 +726,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -772,7 +772,7 @@Method Details
Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -818,13 +818,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -833,7 +833,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -848,22 +848,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. diff --git a/docs/dyn/dlp_v2.organizations.locations.dlpJobs.html b/docs/dyn/dlp_v2.organizations.locations.dlpJobs.html index 727d77ccac3..4ddb7a33d47 100644 --- a/docs/dyn/dlp_v2.organizations.locations.dlpJobs.html +++ b/docs/dyn/dlp_v2.organizations.locations.dlpJobs.html @@ -97,7 +97,7 @@Method Details
parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect jobs: - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - `trigger_name` - The name of the trigger that created the job. - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * Supported fields for risk analysis jobs: - `state` - RUNNING|CANCELED|FINISHED|FAILED - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * The operator must be `=` or `!=`. Examples: * inspected_storage = cloud_storage AND state = done * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = done OR state = canceled) * end_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` pageSize: integer, The standard list page size. pageToken: string, The standard list page token. type: string, The type of job. Defaults to `DlpJobType.INSPECT` diff --git a/docs/dyn/dlp_v2.organizations.locations.inspectTemplates.html b/docs/dyn/dlp_v2.organizations.locations.inspectTemplates.html index 9f361ad4927..82d04a60458 100644 --- a/docs/dyn/dlp_v2.organizations.locations.inspectTemplates.html +++ b/docs/dyn/dlp_v2.organizations.locations.inspectTemplates.html @@ -721,9 +721,9 @@Method Details
Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -920,7 +920,7 @@Method Details
"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListInspectTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListInspectTemplates request. }Method Details
parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect triggers: - `status` - HEALTHY|PAUSED|CANCELLED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - 'last_run_time` - RFC 3339 formatted timestamp, surrounded by quotation marks. Nanoseconds are ignored. - 'error_count' - Number of errors that have occurred while running. * The operator must be `=` or `!=` for status and inspected_storage. Examples: * inspected_storage = cloud_storage AND status = HEALTHY * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = PAUSED OR state = HEALTHY) * last_run_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. - pageSize: integer, Size of the page, can be limited by a server. - pageToken: string, Page token to continue retrieval. Comes from previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. + orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. + pageSize: integer, Size of the page. This value can be limited by a server. + pageToken: string, Page token to continue retrieval. Comes from the previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. type: string, The type of jobs. Will use `DlpJobType.INSPECT` if not set. Allowed values DLP_JOB_TYPE_UNSPECIFIED - Defaults to INSPECT_JOB. @@ -1542,7 +1542,7 @@Method Details
"updateTime": "A String", # Output only. The last update timestamp of a triggeredJob. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListJobTriggers request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListJobTriggers request. }Method Details
Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -478,7 +478,7 @@Method Details
An object of the form: { # Response message for ListStoredInfoTypes. - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListStoredInfoTypes request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListStoredInfoTypes request. "storedInfoTypes": [ # List of storedInfoTypes, up to page_size in ListStoredInfoTypesRequest. { # StoredInfoType resource message that contains information about the current version and any pending updates. "currentVersion": { # Version of a StoredInfoType, including the configuration used to build it, create timestamp, and current state. # Current version of the stored info type. diff --git a/docs/dyn/dlp_v2.organizations.storedInfoTypes.html b/docs/dyn/dlp_v2.organizations.storedInfoTypes.html index 68de211ec57..812c15bb642 100644 --- a/docs/dyn/dlp_v2.organizations.storedInfoTypes.html +++ b/docs/dyn/dlp_v2.organizations.storedInfoTypes.html @@ -466,9 +466,9 @@Method Details
Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -478,7 +478,7 @@Method Details
An object of the form: { # Response message for ListStoredInfoTypes. - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListStoredInfoTypes request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListStoredInfoTypes request. "storedInfoTypes": [ # List of storedInfoTypes, up to page_size in ListStoredInfoTypesRequest. { # StoredInfoType resource message that contains information about the current version and any pending updates. "currentVersion": { # Version of a StoredInfoType, including the configuration used to build it, create timestamp, and current state. # Current version of the stored info type. diff --git a/docs/dyn/dlp_v2.projects.deidentifyTemplates.html b/docs/dyn/dlp_v2.projects.deidentifyTemplates.html index 9bdd468b23e..531c5db5d0e 100644 --- a/docs/dyn/dlp_v2.projects.deidentifyTemplates.html +++ b/docs/dyn/dlp_v2.projects.deidentifyTemplates.html @@ -2752,9 +2752,9 @@Method Details
Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListDeidentifyTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListDeidentifyTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -3628,7 +3628,7 @@Method Details
"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDeidentifyTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDeidentifyTemplates request. }Method Details
parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect jobs: - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - `trigger_name` - The name of the trigger that created the job. - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * Supported fields for risk analysis jobs: - `state` - RUNNING|CANCELED|FINISHED|FAILED - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * The operator must be `=` or `!=`. Examples: * inspected_storage = cloud_storage AND state = done * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = done OR state = canceled) * end_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` pageSize: integer, The standard list page size. pageToken: string, The standard list page token. type: string, The type of job. Defaults to `DlpJobType.INSPECT` diff --git a/docs/dyn/dlp_v2.projects.inspectTemplates.html b/docs/dyn/dlp_v2.projects.inspectTemplates.html index 89e09db8d40..ce891429f8b 100644 --- a/docs/dyn/dlp_v2.projects.inspectTemplates.html +++ b/docs/dyn/dlp_v2.projects.inspectTemplates.html @@ -721,9 +721,9 @@Method Details
Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -920,7 +920,7 @@Method Details
"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListInspectTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListInspectTemplates request. }
Close httplib2 connections.
create(parent, body=None, x__xgafv=None)
Creates a config for Discovery to scan and profile storage.
+Creates a config for discovery to scan and profile storage.
-Deletes a Discovery configuration.
+Deletes a discovery configuration.
-Gets a Discovery configuration.
+Gets a discovery configuration.
list(parent, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)
Lists Discovery configurations.
+Lists discovery configurations.
Retrieves the next page of results.
patch(name, body=None, x__xgafv=None)
Updates a Discovery configuration.
+Updates a discovery configuration.
close()
@@ -103,7 +103,7 @@ create(parent, body=None, x__xgafv=None)
- Creates a config for Discovery to scan and profile storage. +Creates a config for discovery to scan and profile storage. Args: parent: string, Required. Parent resource name. The format of this value is as follows: `projects/`PROJECT_ID`/locations/`LOCATION_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) @@ -111,8 +111,8 @@Method Details
The object takes the form of: { # Request message for CreateDiscoveryConfig. - "configId": "A String", # The config id can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one. - "discoveryConfig": { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. The DiscoveryConfig to create. + "configId": "A String", # The config ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one. + "discoveryConfig": { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. The DiscoveryConfig to create. "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -158,13 +158,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -173,7 +173,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -188,22 +188,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -233,7 +233,7 @@Method Details
Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -279,13 +279,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -294,7 +294,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -309,22 +309,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -348,7 +348,7 @@Method Details
delete(name, x__xgafv=None)
-Deletes a Discovery configuration. +Deletes a discovery configuration. Args: name: string, Required. Resource name of the project and the config, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required) @@ -366,7 +366,7 @@@@ -867,7 +867,7 @@Method Details
diff --git a/docs/dyn/dlp_v2.projects.locations.storedInfoTypes.html b/docs/dyn/dlp_v2.projects.locations.storedInfoTypes.html index c16418792f2..2de077729da 100644 --- a/docs/dyn/dlp_v2.projects.locations.storedInfoTypes.html +++ b/docs/dyn/dlp_v2.projects.locations.storedInfoTypes.html @@ -466,9 +466,9 @@get(name, x__xgafv=None)
-Gets a Discovery configuration. +Gets a discovery configuration. Args: name: string, Required. Resource name of the project and the configuration, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required) @@ -378,7 +378,7 @@Method Details
Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -424,13 +424,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -439,7 +439,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -454,22 +454,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -493,13 +493,13 @@Method Details
@@ -642,7 +642,7 @@list(parent, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)
-Lists Discovery configurations. +Lists discovery configurations. Args: parent: string, Required. Parent resource name. The format of this value is as follows: `projects/`PROJECT_ID`/locations/`LOCATION_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) - orderBy: string, Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status. - pageSize: integer, Size of the page, can be limited by a server. - pageToken: string, Page token to continue retrieval. Comes from previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls. + orderBy: string, Comma separated list of config fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `last_run_time`: corresponds to the last time the DiscoveryConfig ran. - `name`: corresponds to the DiscoveryConfig's name. - `status`: corresponds to DiscoveryConfig's status. + pageSize: integer, Size of the page. This value can be limited by a server. + pageToken: string, Page token to continue retrieval. Comes from the previous call to ListDiscoveryConfigs. `order_by` field must not change for subsequent calls. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -510,7 +510,7 @@Method Details
{ # Response message for ListDiscoveryConfigs. "discoveryConfigs": [ # List of configs, up to page_size in ListDiscoveryConfigsRequest. - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -556,13 +556,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -571,7 +571,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -586,22 +586,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -622,7 +622,7 @@Method Details
"updateTime": "A String", # Output only. The last update timestamp of a DiscoveryConfig. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListDiscoveryConfigs request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListDiscoveryConfigs request. }Method Details
diff --git a/docs/dyn/dlp_v2.projects.locations.jobTriggers.html b/docs/dyn/dlp_v2.projects.locations.jobTriggers.html index 2d186e86fd0..2f0c5b312c0 100644 --- a/docs/dyn/dlp_v2.projects.locations.jobTriggers.html +++ b/docs/dyn/dlp_v2.projects.locations.jobTriggers.html @@ -4991,9 +4991,9 @@patch(name, body=None, x__xgafv=None)
-Updates a Discovery configuration. +Updates a discovery configuration. Args: name: string, Required. Resource name of the project and the configuration, for example `projects/dlp-test-project/discoveryConfigs/53234423`. (required) @@ -650,7 +650,7 @@Method Details
The object takes the form of: { # Request message for UpdateDiscoveryConfig. - "discoveryConfig": { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # New DiscoveryConfig value. + "discoveryConfig": { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). # Required. New DiscoveryConfig value. "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -696,13 +696,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -711,7 +711,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -726,22 +726,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. @@ -772,7 +772,7 @@Method Details
Returns: An object of the form: - { # Configuration for Discovery to scan resources for profile generation. Only one Discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). + { # Configuration for discovery to scan resources for profile generation. Only one discovery configuration may exist per organization, folder, or project. The generated data profiles are retained according to the [data retention policy] (https://cloud.google.com/dlp/docs/data-profiles#retention). "actions": [ # Actions to execute at the completion of scanning. { # A task to execute when a data profile has been generated. "exportData": { # If set, the detailed data profiles will be persisted to the location of your choice whenever updated. # Export data profiles into a provided location. @@ -818,13 +818,13 @@Method Details
], }, ], - "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data_residency. + "inspectTemplates": [ # Detection logic for profile generation. Not all template features are used by Discovery. FindingLimits, include_quote and exclude_info_types have no impact on Discovery. Multiple templates may be provided if there is data in multiple regions. At most one template must be specified per-region (including "global"). Each region is scanned using the applicable template. If no region-specific template is specified, but a "global" template is specified, it will be copied to that region and used instead. If no global or region-specific template is provided for a region with data, that region's data will not be scanned. For more information, see https://cloud.google.com/dlp/docs/data-profiles#data-residency. "A String", ], "lastRunTime": "A String", # Output only. The timestamp of the last time this config was executed. "name": "A String", # Unique resource name for the DiscoveryConfig, assigned by the service when the DiscoveryConfig is created, for example `projects/dlp-test-project/locations/global/discoveryConfigs/53234423`. "orgConfig": { # Project and scan location information. Only set when the parent is an org. # Only set when the parent is an org. - "location": { # The location to begin a Discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project + "location": { # The location to begin a discovery scan. Denotes an organization ID or folder ID within an organization. # The data to scan: folder, org, or project "folderId": "A String", # The ID of the Folder within an organization to scan. "organizationId": "A String", # The ID of an organization to scan. }, @@ -833,7 +833,7 @@Method Details
"status": "A String", # Required. A status for this configuration. "targets": [ # Target to match against for determining what to scan and how frequently. { # Target used to match against for Discovery. - "bigQueryTarget": { # Target used to match against for Discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. + "bigQueryTarget": { # Target used to match against for discovery with BigQuery tables # BigQuery target for Discovery. The first target to match a table will be the one applied. "cadence": { # What must take place for a profile to be updated and how frequently it should occur. New tables are scanned as quickly as possible depending on system capacity. # How often and when to update profiles. New tables that match both the filter and conditions are scanned as quickly as possible depending on system capacity. "schemaModifiedCadence": { # The cadence at which to update data profiles when a schema is modified. # Governs when to update data profiles when a schema is modified. "frequency": "A String", # How frequently profiles may be updated when schemas are modified. Defaults to monthly. @@ -848,22 +848,22 @@Method Details
], }, }, - "conditions": { # Requirements that must be true before a table is scanned in Discovery for the first time. There is an AND relationship between the top-level attributes. # In addition to matching the filter, these conditions must be true before a profile is generated. + "conditions": { # Requirements that must be true before a table is scanned in discovery for the first time. There is an AND relationship between the top-level attributes. Additionally, minimum conditions with an OR relationship that must be met before Cloud DLP scans a table can be set (like a minimum row count or a minimum table age). # In addition to matching the filter, these conditions must be true before a profile is generated. "createdAfter": "A String", # BigQuery table must have been created after this date. Used to avoid backfilling. "orConditions": { # There is an OR relationship between these attributes. They are used to determine if a table should be scanned or not in Discovery. # At least one of the conditions must be true for a table to be scanned. "minAge": "A String", # Minimum age a table must have before Cloud DLP can profile it. Value must be 1 hour or greater. "minRowCount": 42, # Minimum number of rows that should be present before Cloud DLP profiles a table }, - "typeCollection": "A String", # Restrict Discovery to categories of table types. - "types": { # The types of bigquery tables supported by Cloud DLP. # Restrict Discovery to specific table types. - "types": [ # A set of bigquery table types. + "typeCollection": "A String", # Restrict discovery to categories of table types. + "types": { # The types of BigQuery tables supported by Cloud DLP. # Restrict discovery to specific table types. + "types": [ # A set of BigQuery table types. "A String", ], }, }, - "disabled": { # Do nothing. # Tables that match this filter will not have profiles created. + "disabled": { # Do not profile the tables. # Tables that match this filter will not have profiles created. }, - "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. Also lets you set minimum conditions that must be met before Cloud DLP scans a table (like a minimum row count or a minimum table age). # Required. The tables the Discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. + "filter": { # Determines what tables will have profiles generated within an organization or project. Includes the ability to filter by regular expression patterns on project ID, dataset ID, and table ID. # Required. The tables the discovery cadence applies to. The first target with a matching filter will be the one to apply to a table. "otherTables": { # Catch-all for all other tables not specified by other filters. Should always be last, except for single-table configurations, which will only have a TableReference target. # Catch-all. This should always be the last filter in the list because anything above it will apply first. Should only appear once in a configuration. If none is specified, a default one will be added automatically. }, "tables": { # Specifies a collection of BigQuery tables. Used for Discovery. # A specific set of tables for this filter to apply to. A table collection must be specified in only one filter per config. If a table id or dataset is empty, Cloud DLP assumes all tables in that collection must be profiled. Must specify a project ID. diff --git a/docs/dyn/dlp_v2.projects.locations.dlpJobs.html b/docs/dyn/dlp_v2.projects.locations.dlpJobs.html index 4c87170a147..f506b2253fe 100644 --- a/docs/dyn/dlp_v2.projects.locations.dlpJobs.html +++ b/docs/dyn/dlp_v2.projects.locations.dlpJobs.html @@ -8160,7 +8160,7 @@Method Details
parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect jobs: - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - `trigger_name` - The name of the trigger that created the job. - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * Supported fields for risk analysis jobs: - `state` - RUNNING|CANCELED|FINISHED|FAILED - 'end_time` - Corresponds to the time the job finished. - 'start_time` - Corresponds to the time the job finished. * The operator must be `=` or `!=`. Examples: * inspected_storage = cloud_storage AND state = done * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = done OR state = canceled) * end_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, end_time asc, create_time desc` Supported fields are: - `create_time`: corresponds to the time the job was created. - `end_time`: corresponds to the time the job ended. - `name`: corresponds to the job's name. - `state`: corresponds to `state` pageSize: integer, The standard list page size. pageToken: string, The standard list page token. type: string, The type of job. Defaults to `DlpJobType.INSPECT` diff --git a/docs/dyn/dlp_v2.projects.locations.inspectTemplates.html b/docs/dyn/dlp_v2.projects.locations.inspectTemplates.html index 62319e3ac1a..a0c7a714da1 100644 --- a/docs/dyn/dlp_v2.projects.locations.inspectTemplates.html +++ b/docs/dyn/dlp_v2.projects.locations.inspectTemplates.html @@ -721,9 +721,9 @@Method Details
Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID + Organizations scope, location specified: `organizations/`ORG_ID`/locations/`LOCATION_ID + Organizations scope, no location specified (defaults to global): `organizations/`ORG_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListInspectTemplates`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the template was created. - `update_time`: corresponds to the time the template was last updated. - `name`: corresponds to the template's name. - `display_name`: corresponds to the template's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListInspectTemplates`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -920,7 +920,7 @@Method Details
"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListInspectTemplates request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListInspectTemplates request. }Method Details
parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect triggers: - `status` - HEALTHY|PAUSED|CANCELLED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - 'last_run_time` - RFC 3339 formatted timestamp, surrounded by quotation marks. Nanoseconds are ignored. - 'error_count' - Number of errors that have occurred while running. * The operator must be `=` or `!=` for status and inspected_storage. Examples: * inspected_storage = cloud_storage AND status = HEALTHY * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = PAUSED OR state = HEALTHY) * last_run_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters. locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. - pageSize: integer, Size of the page, can be limited by a server. - pageToken: string, Page token to continue retrieval. Comes from previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. + orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to the time the JobTrigger was created. - `update_time`: corresponds to the time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to the JobTrigger's name. - `display_name`: corresponds to the JobTrigger's display name. - `status`: corresponds to JobTrigger's status. + pageSize: integer, Size of the page. This value can be limited by a server. + pageToken: string, Page token to continue retrieval. Comes from the previous call to ListJobTriggers. `order_by` field must not change for subsequent calls. type: string, The type of jobs. Will use `DlpJobType.INSPECT` if not set. Allowed values DLP_JOB_TYPE_UNSPECIFIED - Defaults to INSPECT_JOB. @@ -5349,7 +5349,7 @@Method Details
"updateTime": "A String", # Output only. The last update timestamp of a triggeredJob. }, ], - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListJobTriggers request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListJobTriggers request. }Method Details
Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -478,7 +478,7 @@Method Details
An object of the form: { # Response message for ListStoredInfoTypes. - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListStoredInfoTypes request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListStoredInfoTypes request. "storedInfoTypes": [ # List of storedInfoTypes, up to page_size in ListStoredInfoTypesRequest. { # StoredInfoType resource message that contains information about the current version and any pending updates. "currentVersion": { # Version of a StoredInfoType, including the configuration used to build it, create timestamp, and current state. # Current version of the stored info type. diff --git a/docs/dyn/dlp_v2.projects.storedInfoTypes.html b/docs/dyn/dlp_v2.projects.storedInfoTypes.html index 82292875257..5917181d395 100644 --- a/docs/dyn/dlp_v2.projects.storedInfoTypes.html +++ b/docs/dyn/dlp_v2.projects.storedInfoTypes.html @@ -466,9 +466,9 @@Method Details
Args: parent: string, Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required) locationId: string, Deprecated. This field has no effect. - orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. - pageSize: integer, Size of the page, can be limited by the server. If zero server returns a page of max size 100. - pageToken: string, Page token to continue retrieval. Comes from previous call to `ListStoredInfoTypes`. + orderBy: string, Comma separated list of fields to order by, followed by `asc` or `desc` postfix. This list is case insensitive. The default sorting order is ascending. Redundant space characters are insignificant. Example: `name asc, display_name, create_time desc` Supported fields are: - `create_time`: corresponds to the time the most recent version of the resource was created. - `state`: corresponds to the state of the resource. - `name`: corresponds to resource name. - `display_name`: corresponds to info type's display name. + pageSize: integer, Size of the page. This value can be limited by the server. If zero server returns a page of max size 100. + pageToken: string, Page token to continue retrieval. Comes from the previous call to `ListStoredInfoTypes`. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -478,7 +478,7 @@Method Details
An object of the form: { # Response message for ListStoredInfoTypes. - "nextPageToken": "A String", # If the next page is available then the next page token to be used in following ListStoredInfoTypes request. + "nextPageToken": "A String", # If the next page is available then the next page token to be used in the following ListStoredInfoTypes request. "storedInfoTypes": [ # List of storedInfoTypes, up to page_size in ListStoredInfoTypesRequest. { # StoredInfoType resource message that contains information about the current version and any pending updates. "currentVersion": { # Version of a StoredInfoType, including the configuration used to build it, create timestamp, and current state. # Current version of the stored info type. diff --git a/docs/dyn/gmail_v1.users.settings.cse.keypairs.html b/docs/dyn/gmail_v1.users.settings.cse.keypairs.html index b985a208d61..696adfcb3cc 100644 --- a/docs/dyn/gmail_v1.users.settings.cse.keypairs.html +++ b/docs/dyn/gmail_v1.users.settings.cse.keypairs.html @@ -121,6 +121,9 @@Method Details
"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -149,6 +152,9 @@Method Details
"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -191,6 +197,9 @@Method Details
"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -233,6 +242,9 @@Method Details
"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -269,6 +281,9 @@Method Details
"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. @@ -308,6 +323,9 @@Method Details
"pkcs7": "A String", # Input only. The public key and its certificate chain. The chain must be in [PKCS#7](https://en.wikipedia.org/wiki/PKCS_7) format and use PEM encoding and ASCII armor. "privateKeyMetadata": [ # Metadata for instances of this key pair's private key. { # Metadata for a private key instance. + "hardwareKeyMetadata": { # Metadata for hardware keys. # Metadata for hardware keys. + "description": "A String", # Description about the hardware key. + }, "kaclsKeyMetadata": { # Metadata for private keys managed by an external key access control list service. For details about managing key access, see [Google Workspace CSE API Reference](https://developers.google.com/workspace/cse/reference). # Metadata for a private key instance managed by an external key access control list service. "kaclsData": "A String", # Opaque data generated and used by the key access control list service. Maximum size: 8 KiB. "kaclsUri": "A String", # The URI of the key access control list service that manages the private key. diff --git a/docs/dyn/identitytoolkit_v1.accounts.html b/docs/dyn/identitytoolkit_v1.accounts.html index 81e0a0edd87..f5662219655 100644 --- a/docs/dyn/identitytoolkit_v1.accounts.html +++ b/docs/dyn/identitytoolkit_v1.accounts.html @@ -177,7 +177,7 @@Method Details
"providerId": "A String", # The provider ID from the request, if provided. "registered": True or False, # Whether the email identifier represents an existing account. Present only when an email identifier is set in the request. "sessionId": "A String", # The session ID from the request, or a random string generated by CreateAuthUri if absent. It is used to prevent session fixation attacks. - "signinMethods": [ # The list of sign-in methods that the user has previously used. Each element is one of `password`, `emailLink`, or the provider ID of an IdP. Present only when a registered email identifier is set in the request. + "signinMethods": [ # The list of sign-in methods that the user has previously used. Each element is one of `password`, `emailLink`, or the provider ID of an IdP. Present only when a registered email identifier is set in the request. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, this method returns an empty list. "A String", ], }Method Details
], "disableUser": True or False, # If true, marks the account as disabled, meaning the user will no longer be able to sign-in. "displayName": "A String", # The user's new display name to be updated in the account's attributes. The length of the display name must be less than or equal to 256 characters. - "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. + "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, the email cannot be changed by the user without verifying the email first, but it can be changed by an administrator. "emailVerified": True or False, # Whether the user's email has been verified. Specifying this field requires a Google OAuth 2.0 credential with proper [permissions] (https://cloud.google.com/identity-platform/docs/access-control). "idToken": "A String", # A valid Identity Platform ID token. Required when attempting to change user-related information. "instanceId": "A String", diff --git a/docs/dyn/identitytoolkit_v1.projects.accounts.html b/docs/dyn/identitytoolkit_v1.projects.accounts.html index 4b390a3434a..857a2840f74 100644 --- a/docs/dyn/identitytoolkit_v1.projects.accounts.html +++ b/docs/dyn/identitytoolkit_v1.projects.accounts.html @@ -644,7 +644,7 @@Method Details
], "disableUser": True or False, # If true, marks the account as disabled, meaning the user will no longer be able to sign-in. "displayName": "A String", # The user's new display name to be updated in the account's attributes. The length of the display name must be less than or equal to 256 characters. - "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. + "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, the email cannot be changed by the user without verifying the email first, but it can be changed by an administrator. "emailVerified": True or False, # Whether the user's email has been verified. Specifying this field requires a Google OAuth 2.0 credential with proper [permissions] (https://cloud.google.com/identity-platform/docs/access-control). "idToken": "A String", # A valid Identity Platform ID token. Required when attempting to change user-related information. "instanceId": "A String", diff --git a/docs/dyn/identitytoolkit_v1.projects.tenants.accounts.html b/docs/dyn/identitytoolkit_v1.projects.tenants.accounts.html index 2443137ed7b..d9cba83d2bf 100644 --- a/docs/dyn/identitytoolkit_v1.projects.tenants.accounts.html +++ b/docs/dyn/identitytoolkit_v1.projects.tenants.accounts.html @@ -651,7 +651,7 @@Method Details
], "disableUser": True or False, # If true, marks the account as disabled, meaning the user will no longer be able to sign-in. "displayName": "A String", # The user's new display name to be updated in the account's attributes. The length of the display name must be less than or equal to 256 characters. - "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. + "email": "A String", # The user's new email to be updated in the account's attributes. The length of email should be less than 256 characters and in the format of `name@domain.tld`. The email should also match the [RFC 822](https://tools.ietf.org/html/rfc822) addr-spec production. If [email enumeration protection](https://cloud.google.com/identity-platform/docs/admin/email-enumeration-protection) is enabled, the email cannot be changed by the user without verifying the email first, but it can be changed by an administrator. "emailVerified": True or False, # Whether the user's email has been verified. Specifying this field requires a Google OAuth 2.0 credential with proper [permissions] (https://cloud.google.com/identity-platform/docs/access-control). "idToken": "A String", # A valid Identity Platform ID token. Required when attempting to change user-related information. "instanceId": "A String", diff --git a/docs/dyn/identitytoolkit_v2.projects.html b/docs/dyn/identitytoolkit_v2.projects.html index 64f4c72c396..6b4750ea3df 100644 --- a/docs/dyn/identitytoolkit_v2.projects.html +++ b/docs/dyn/identitytoolkit_v2.projects.html @@ -105,15 +105,9 @@Instance Methods
getConfig(name, x__xgafv=None)
Retrieve an Identity Toolkit project configuration.
--
-getPasskeyConfig(name, x__xgafv=None)
Retrieve a passkey configuration for an Identity Toolkit project.
updateConfig(name, body=None, updateMask=None, x__xgafv=None)
Update an Identity Toolkit project configuration.
--
-updatePasskeyConfig(name, body=None, updateMask=None, x__xgafv=None)
Update a passkey configuration for an Identity Toolkit project.
Method Details
-close()
@@ -340,29 +334,6 @@Method Details
}--getPasskeyConfig(name, x__xgafv=None)
-Retrieve a passkey configuration for an Identity Toolkit project. - -Args: - name: string, Required. The resource name of the config, for example: 'projects/my-awesome-project/passkeyConfig'. (required) - x__xgafv: string, V1 error format. - Allowed values - 1 - v1 error format - 2 - v2 error format - -Returns: - An object of the form: - - { # Configuration for signing in users using passkeys. - "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys. - "A String", - ], - "name": "A String", # Required. The name of the PasskeyConfig resource. - "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created. -}--updateConfig(name, body=None, updateMask=None, x__xgafv=None)
Update an Identity Toolkit project configuration. @@ -792,39 +763,4 @@Method Details
}--updatePasskeyConfig(name, body=None, updateMask=None, x__xgafv=None)
-Update a passkey configuration for an Identity Toolkit project. - -Args: - name: string, Required. The name of the PasskeyConfig resource. (required) - body: object, The request body. - The object takes the form of: - -{ # Configuration for signing in users using passkeys. - "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys. - "A String", - ], - "name": "A String", # Required. The name of the PasskeyConfig resource. - "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created. -} - - updateMask: string, Optional. The update mask applies to the resource. Empty update mask will result in updating nothing. For the `FieldMask` definition, see https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask - x__xgafv: string, V1 error format. - Allowed values - 1 - v1 error format - 2 - v2 error format - -Returns: - An object of the form: - - { # Configuration for signing in users using passkeys. - "expectedOrigins": [ # Required. The website or app origins associated with the customer's sites or apps. Only challenges signed from these origins will be allowed to sign in with passkeys. - "A String", - ], - "name": "A String", # Required. The name of the PasskeyConfig resource. - "rpId": "A String", # Required. The relying party ID for the purpose of passkeys verifications. This cannot be changed once created. -}-